Beispiel #1
0
    def __init__(self, name, rate_limiter):
        super(Leader, self).__init__(name,
                                     rate_limiter=rate_limiter,
                                     laser_range=[np.pi / 3.5, np.pi / 3.5],
                                     laser_dist=2)
        self._epsilon = 0.1
        self.leg_pub = rospy.Publisher('/' + name + '/legs',
                                       Marker,
                                       queue_size=5)
        self.centroid_pub = rospy.Publisher('/' + name + '/centroids',
                                            Marker,
                                            queue_size=5)
        self.v_pub = rospy.Publisher('/' + name + '/vel', Marker, queue_size=5)
        self.follow = None
        self._tf = TransformListener()
        self.last_vel = np.array([0., 0.])
        self.all_around_laser = Laser(name=name, laser_range=[np.pi, np.pi])
        self.last_legs = None

        def f(state, noise):
            pass

        def g(state, noise):
            pass

        self.ukf = UnscentedKalmanFilter(f, g)
        self.kf = None
    def __init__(self, dimState, delay, arm, knoiseU, controller):
        '''
    	Initializes parameters to uses the function implemented below
    	
    	inputs:		-dimState: dimension of the state, here the state correspond to the muscular activation vector U, int
    			-dimObs: dimension of the observation, here the observation is the position of the arm given by the model, int
    			-delay: the delay with which we give the observation to the filter, int
    			-arm, armModel, class object
                        -rs, ReadSetupFile, class object
    	'''
        self.name = "UnscentedKalmanFilter"
        self.knoiseU = knoiseU
        self.dimState = dimState
        self.delay = delay
        self.arm = arm
        self.controller = controller

        #initialization of some parameters for the filter
        transition_covariance = np.eye(self.dimState) * 0.01
        initial_state_mean = np.zeros(self.dimState)
        observation_covariance = 1000 * np.eye(self.dimState)
        initial_state_covariance = np.eye(self.dimState)
        self.nextCovariance = np.eye(self.dimState) * 0.0001
        self.ukf = UnscentedKalmanFilter(self.transitionFunctionUKF,
                                         self.observationFunctionUKF,
                                         transition_covariance,
                                         observation_covariance,
                                         initial_state_mean,
                                         initial_state_covariance)
Beispiel #3
0
 def __init__(self):
     initialState = np.zeros((6, 1))
     initialCov = np.diag(np.ones((6, 1))[:, 0])
     self.filter = UnscentedKalmanFilter(
         initial_state_mean=initialState,
         initial_state_covariance=initialCov,
         n_dim_obs=6)
def test_kalman2():
    from pykalman import UnscentedKalmanFilter
    ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w),
                                lambda x, v: x + v,
                                transition_covariance=0.1)
    (filtered_state_means, filtered_state_covariances) = ukf.filter([0, 1, 2])
    (smoothed_state_means, smoothed_state_covariances) = ukf.smooth([0, 1, 2])
Beispiel #5
0
def kalman_filter(seq):
    def f(state, noise):
        return state + np.sin(noise)

    def g(state, noise):
        return state + np.cos(noise)

    # kf = KalmanFilter(initial_state_mean=0, n_dim_obs=2)
    ukf = UnscentedKalmanFilter(f, g, 0.001)
    return ukf.smooth(seq)[0].T[0]
Beispiel #6
0
def kalman_feat(df, cols):
    for col in cols:
        ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w),
                                    lambda x, v: x + v,
                                    observation_covariance=0.1)
        (filtered_state_means,
         filtered_state_covariances) = ukf.filter(df[col])
        (smoothed_state_means,
         smoothed_state_covariances) = ukf.smooth(df[col])
        df[col + "_UKFSMOOTH"] = smoothed_state_means.flatten()
        df[col + "_UKFFILTER"] = filtered_state_means.flatten()
    return df
Beispiel #7
0
def kalmanFilter(X):

    ukf = UnscentedKalmanFilter()

    X_fil = np.zeros(np.shape(X))

    for i in range(np.shape(X)[1]):
        X_fil[:, i] = ukf.smooth(X[:, i])[0].ravel()

    return X_fil


#if __name__ == "__main__":
#
#    subject = 'qing_frontal_3.7'
#    X,y= fetchDataset(subject)
#
#    seq_x, seq,y = split_sequences(s, n_steps):
##    X_train,X_test,y_train,y_test = split_train_test(X,y)
Beispiel #8
0
 def __init__(self, x, v, w):
     self.random_state = np.random.RandomState(0)
     self.transition_covariance = np.array([[0.5, 0, 0, 0, 0],\
                                             [0, 1, 0, 0, 0],\
                                             [0, 0, 0.1, 0, 0],\
                                             [0, 0, 0, 0.001, 0],\
                                             [0, 0, 0, 0, 0.001],\
                                             ])
     self.observation_covariance = np.array([[0.5, 0, 0, 0, 0],\
                                            [0, 1, 0, 0, 0],\
                                            [0, 0, 0.5, 0, 0],\
                                            [0, 0, 0, 0.001, 0],\
                                            [0, 0, 0, 0, 0.001],\
                                           ])
     self.initial_state_mean = [x, 0.1, v, np.pi / 2, w]
     # self.initial_state_mean = [0, 0, 20, 0, np.pi / 180]
     self.transition_state = self.initial_state_mean
     self.obs = self.initial_state_mean
     self.pre_parabola_param = [0, 0, 1.75]
     self.initial_state_covariance = np.array([[0.5, 0, 0, 0, 0],\
                                               [0, 0.02, 0, 0, 0],\
                                               [0, 0, 0.1, 0, 0],\
                                               [0, 0, 0, 0.001, 0],\
                                               [0, 0, 0, 0, 0.001],\
                                               ])
     self.T = 0.5
     self.estimate_state = [
         self.initial_state_mean, self.initial_state_covariance
     ]
     self.kf = UnscentedKalmanFilter(self.transition_function,
                                     self.observation_function,
                                     self.transition_covariance,
                                     self.observation_covariance,
                                     self.initial_state_mean,
                                     self.initial_state_covariance,
                                     random_state=self.random_state)
     self.timestamp = time.time()
Beispiel #9
0
def kalman_filter(video_file, dep0, v0):
    # get feature points from video
    points, init_image = get_feature_points_from_video(video_file)
    i, o = feature_points_to_observation_and_initilization(points, v0, dep0)
    # number of the features points
    observations = np.asarray(o)
    n = observations.shape[1]
    print('initilization shape is ', i.shape)
    print('observation shape is ', observations.shape)
    # initialize the vovariance and mean
    transition_covariance = np.eye(2*n+6)
    random_state = np.random.RandomState(0)
    observation_covariance = np.eye(n) + abs(random_state.randn(n, n) * 0.005)

    initial_state_mean = i
    covariance_init = random_state.randn(2*n+6, 2*n+6) * 0.2
    covariance_init[0:3,0:3] = 0.005
    initial_state_covariance =  np.eye(2*n+6) + abs(covariance_init)
    # set Unscented kalman filter
    kf = UnscentedKalmanFilter(
        transition_function, observation_function,
        transition_covariance, observation_covariance,
        initial_state_mean, initial_state_covariance,
        random_state=random_state
        )
    """
    kf = AdditiveUnscentedKalmanFilter(
        additive_transition_function, additive_observation_function,
        transition_covariance, observation_covariance,
        initial_state_mean, initial_state_covariance
        )
    """
    # get result
    filtered_state_estimates = kf.filter(observations)
    smoothed_state_estimates = kf.smooth(observations)
    return filtered_state_estimates, smoothed_state_estimates
Beispiel #10
0
GPScord = []
for d in data:
    temp = d.split(',')
    temp2 = []
    temp2.append(float(temp[1][2:][:-1]))
    temp2.append(float(temp[2][2:][:-1]))
    temp2.append(float(temp[3][2:][:-2]))
    GPScord.append(temp2)
#for record in GPScord:
#	print record
from pykalman import KalmanFilter
from pykalman import UnscentedKalmanFilter
#kf = KalmanFilter(n_dim_obs=3)
kf = KalmanFilter(transition_matrices=[[1, 1], [0, 1]],
                  observation_matrices=[[0.1, 0.5], [-0.3, 0.0]])
kf2 = UnscentedKalmanFilter()
measurements = GPScord
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
lat = []
lon = []
alt = []
for sample in GPScord:
    lat.append(sample[0])
    lon.append(sample[1])
    alt.append(sample[2])
ax.scatter(lat, lon, alt, c='r', marker='o', s=10)
ax.plot(lat, lon, alt)
ax.set_xlabel('X Label')
Beispiel #11
0
def additive_observation_function(state):
    return observation_function(state, np.array([0, 0]))


transition_covariance = np.eye(2)
random_state = np.random.RandomState(0)
observation_covariance = np.eye(2) + random_state.randn(2, 2) * 0.1
initial_state_mean = [0, 0]
initial_state_covariance = [[1, 0.1], [0.1, 1]]

# sample from model
ukf = UnscentedKalmanFilter(transition_function,
                            observation_function,
                            transition_covariance,
                            observation_covariance,
                            initial_state_mean,
                            initial_state_covariance,
                            random_state=random_state)
akf = AdditiveUnscentedKalmanFilter(additive_transition_function,
                                    additive_observation_function,
                                    transition_covariance,
                                    observation_covariance, initial_state_mean,
                                    initial_state_covariance)
states, observations = ukf.sample(50, initial_state_mean)

# estimate state with filtering
ukf_state_estimates = ukf.filter(observations)[0]
akf_state_estimates = akf.filter(observations)[0]

# draw estimates
 def test_unscented_kalman(self):
     ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w), lambda x, v: x + v, transition_covariance=0.1)
     (filtered_state_means, filtered_state_covariances) = ukf.filter([0, 1, 2])
     (smoothed_state_means, smoothed_state_covariances) = ukf.smooth([0, 1, 2])
     return filtered_state_means
Beispiel #13
0
def main(argv):
    config = read_parser(argv, Inputs, InputsOpt_Defaults)

    if config['mode'] == 'fuse_kalman':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:

                example.append(newdict[feature][i])
            Features.append(example)

        Features = np.array(Features)

        plt.plot(Features)
        plt.show()
        scaler_model = StandardScaler()
        scaler_model.fit(Features)
        Features = scaler_model.transform(Features)

        plt.plot(Features)
        plt.show()

        kf = UnscentedKalmanFilter(n_dim_state=1,
                                   n_dim_obs=len(config['feature_array']),
                                   random_state=1)
        # kf = AdditiveUnscentedKalmanFilter(n_dim_state=1, n_dim_obs=6, random_state=1)
        # kf = KalmanFilter(n_dim_state=1, n_dim_obs=len(config['feature_array']), random_state=1)
        measurements = [i for i in Features]

        # kf.em(measurements)

        z = kf.smooth(measurements)
        z = z[0]

        plt.plot(z)
        plt.show()

    elif config['mode'] == 'testtt':
        if config['mypath'] == None:
            print('Select XLS Files with Feature')
            root = Tk()
            root.withdraw()
            root.update()
            Filepaths = filedialog.askopenfilenames()
            root.destroy()
        else:
            # Filepath = config['mypath']
            Filepaths = [
                join(config['mypath'], f) for f in listdir(config['mypath'])
                if isfile(join(config['mypath'], f)) if f[-4:] == 'xlsx'
            ]

        Feature = []
        for filepath in Filepaths:
            mydict = pd.read_excel(filepath)
            mydict = mydict.to_dict(orient='list')
            Feature += mydict[config['feature']]
        Feature = np.array(Feature)

        for i in range(len(Feature)):
            count = 0
            while np.isnan(Feature[i]) == True:
                count += 1
                Feature[i] = Feature[i + count]
        # Feature = np.nan_to_num(Feature)

        Feature = movil_avg(Feature, config['n_mov_avg'])

        plt.plot(Feature)
        plt.show()

    elif config['mode'] == 'fuse_factor':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:

                example.append(newdict[feature][i])
            Features.append(example)

        Features = np.array(Features)

        plt.plot(Features)
        plt.show()
        scaler_model = StandardScaler()
        scaler_model.fit(Features)
        Features = scaler_model.transform(Features)
        plt.plot(Features)
        plt.show()

        fa = FactorAnalysis(n_components=1, random_state=1)
        fa.fit(Features)

        z = fa.transform(Features)

        plt.plot(z)
        plt.show()

    elif config['mode'] == 'fuse_pca':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:

                example.append(newdict[feature][i])
            Features.append(example)

        Features = np.array(Features)

        plt.plot(Features)
        plt.show()

        scaler_model = StandardScaler()
        scaler_model.fit(Features)
        Features = scaler_model.transform(Features)

        plt.plot(Features)
        plt.show()

        from sklearn.cluster import FeatureAgglomeration
        from sklearn.decomposition import FastICA
        from sklearn.decomposition import KernelPCA

        FeatureAgglomeration

        # model = PCA(n_components=len(config['feature_array']))
        model = PCA(n_components=2)
        # model = FeatureAgglomeration(n_clusters=1)
        # model = FastICA(n_components=1)
        # model = KernelPCA(n_components=1, kernel='rbf')
        # model = KernelPCA(n_components=len(config['feature_array']), kernel='rbf')

        model.fit(Features)
        Features = model.transform(Features)

        print(model.explained_variance_)
        print(model.explained_variance_ratio_)

        plt.plot(Features)
        plt.show()

        # TFeatures = np.transpose(Features)
        # plt.plot(TFeatures[0])
        # plt.show()

        # mydict_out = {}
        # mydict_out['FuFFT_ma1'] = TFeatures[0]

        # writer = pd.ExcelWriter('MASTER_Features.xlsx')
        # DataFr = pd.DataFrame(data=mydict_out, index=rownames)
        # DataFr.to_excel(writer, sheet_name='AE_Features')
        # writer.close()

    elif config['mode'] == 'fuse_corr':
        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        Features = {}
        for key, values in mydict.items():
            Features[key] = movil_avg(mydict[key], config['n_mov_avg'])

        CorrCoefs = {}
        for feature1 in config['feature_array']:
            mylist = []
            for feature2 in config['feature_array']:
                value = np.corrcoef(Features[feature1],
                                    Features[feature2])[0][1]
                mylist.append(value)
            CorrCoefs[feature1] = mylist

        writer = pd.ExcelWriter('Corr_Features.xlsx')
        DataFr = pd.DataFrame(data=CorrCoefs, index=config['feature_array'])
        DataFr.to_excel(writer, sheet_name='Corr_Features')
        writer.close()

    elif config['mode'] == 'test_cluster':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:

                example.append(newdict[feature][i])
            Features.append(example)

        Features = np.array(Features)

        from sklearn.cluster import DBSCAN, AffinityPropagation

        model = AffinityPropagation()
        print(model.fit_predict(Features))

    elif config['mode'] == 'fuse_mlp':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)

        mydict = mydict.to_dict(orient='list')

        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:

                example.append(newdict[feature][i])
            Features.append(example)

        Features = np.array(Features)

        plt.plot(Features)
        plt.show()

        scaler_model = StandardScaler()
        scaler_model.fit(Features)
        Features = scaler_model.transform(Features)

        plt.plot(Features)
        plt.show()

        nn = MLPRegressor(hidden_layer_sizes=(5),
                          activation='tanh',
                          solver='lbfgs0',
                          alpha=1.e1)
        nn.fit(X=Features, y=np.linspace(0, 1, length_data))

        z = nn.predict(Features)

        # Features = pca.transform(Features)

        # corr = []
        # TFeatures = np.transpose(Features)
        # for feature_pca in TFeatures:
        # corr.append(np.corrcoef(np.ravel(feature_pca), np.arange(len(feature_pca)))[0][1])

        # z = TFeatures[np.argmax(np.absolute(corr))]
        # print(corr)
        plt.plot(z)
        plt.show()

    elif config['mode'] == 'predict_from_xls_mlp':
        print('Select xls')
        root = Tk()
        root.withdraw()
        root.update()
        Filepaths = filedialog.askopenfilenames()
        root.destroy()

        Feature = []
        for filepath in Filepaths:
            mydict = pd.read_excel(filepath, sheetname=config['sheet'])

            mydict = mydict.to_dict(orient='list')
            # Feature += mydict[config['feature']][:-2]
            Feature += mydict[config['feature']]
        Feature = list(np.nan_to_num(Feature))
        Feature = movil_avg(Feature, config['n_mov_avg'])

        Feature = np.array(Feature)
        # fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
        # ax[0].plot(Feature, 'r')
        Feature = median_filter(data=Feature, points=5, same_length=True)
        # ax[1].plot(Feature, 'b')
        # plt.show()
        x_Feature = np.arange(len(Feature))

        Train = Feature[0:int(config['train'] * len(Feature))]
        x_Train = np.arange(float(len(Train)))

        x_Predict = np.linspace(len(Train),
                                len(Feature),
                                num=len(Feature) - len(Train),
                                endpoint=False)

        # scaler = StandardScaler()
        # scaler = RobustScaler()
        # scaler.fit(Train)
        # Train = scaler.transform(Train)

        clf = MLPRegressor(solver=config['solver'],
                           alpha=config['alpha'],
                           hidden_layer_sizes=config['layers'],
                           random_state=config['rs'],
                           activation=config['activation'],
                           tol=config['tol'],
                           verbose=True,
                           max_iter=config['max_iter'])

        # from sklearn.tree import DecisionTreeRegressor
        # clf = DecisionTreeRegressor()

        n_pre = int(config['n_pre'] * len(Train))
        m_post = int(config['m_post'] * len(Train))
        n_ex = len(Train) - n_pre - m_post
        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        a = input('enter to continue...')
        T_Inputs = []
        T_Outputs = []
        # plt.plot(Train, 'k')
        # plt.show()
        for k in range(n_ex + 1):

            T_Inputs.append(Train[k:k + n_pre])
            T_Outputs.append(Train[k + n_pre:k + n_pre + m_post])

            # aa = np.arange(len(Train[k : k + n_pre]))
            # plt.plot(aa, Train[k : k + n_pre], 'b')
            # bb = np.max(aa) + np.arange(len(Train[k + n_pre : k + n_pre + m_post]))
            # plt.plot(bb, Train[k + n_pre : k + n_pre + m_post], 'r')
            # plt.show()
        # sys.exit()

        from sklearn.model_selection import KFold, GroupKFold
        kf = KFold(n_splits=100, shuffle=True)

        # X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 19]])
        # y = np.array([[3], [5], [7], [9], [11], [13], [15], [17], [19]])
        # # X = T_Inputs
        # # y = T_Outputs
        # for i in range(3):
        # for train_index, test_index in kf.split(X):
        # print("TRAIN:", train_index, "TEST:", test_index)
        # # X_train, X_test = X[train_index], X[test_index]
        # # y_train, y_test = y[train_index], y[test_index]
        # sys.exit()
        T_Inputs = np.array(T_Inputs)
        T_Outputs = np.array(T_Outputs)
        count = 0
        epochs = 10
        for i in range(epochs):
            print(
                '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++Epoch',
                count)
            numb = 0
            for train_index, test_index in kf.split(T_Inputs):
                print('+++++++++++Batch', numb)
                T_Inputs_train, T_Inputs_test = T_Inputs[
                    train_index], T_Inputs[test_index]
                T_Outputs_train, T_Outputs_test = T_Outputs[
                    train_index], T_Outputs[test_index]
                clf.partial_fit(T_Inputs_test, T_Outputs_test)
                numb += 1
            count += 1

        # clf.fit(T_Inputs, T_Outputs)
        print(
            '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n'
        )
        Predict = []
        It_Train = list(Train)

        for k in range(len(x_Predict) + m_post - 1):
            P_Input = It_Train[n_ex + k + 1:n_ex + n_pre + k + 1]

            P_Output = clf.predict([P_Input])
            P_Output = P_Output[0]

            Predict.append(P_Output[-1])
            It_Train.append(P_Output[-1])

        Predict = Predict[:-(m_post - 1)]

        fig, ax = plt.subplots()
        ax.set_xlabel('Accumulated Operating Hours', fontsize=13)
        ax.set_ylabel('Health Index', fontsize=13)
        ax.set_title('Linear Regression', fontsize=13)
        fact = 5. / 3600.
        ax.plot(x_Feature * fact, Feature, 'b', label='Real')
        ax.plot(x_Predict * fact, Predict, 'r', label='Prediction')
        ax.plot(x_Train * fact, Train, 'k', label='Training')
        ax.legend()
        plt.show()

        plt.plot(x_Feature, Feature, 'b', x_Predict, Predict, 'r', x_Train,
                 Train, 'k')
        plt.show()

    # elif config['mode'] == 'predict_from_xls_mlp':
    # print('Select xls')
    # root = Tk()
    # root.withdraw()
    # root.update()
    # Filepaths = filedialog.askopenfilenames()
    # root.destroy()

    # Feature = []
    # for filepath in Filepaths:
    # mydict = pd.read_excel(filepath, sheetname=config['sheet'])

    # mydict = mydict.to_dict(orient='list')
    # # Feature += mydict[config['feature']][:-2]
    # Feature += mydict[config['feature']]
    # Feature = list(np.nan_to_num(Feature))
    # Feature = movil_avg(Feature, config['n_mov_avg'])

    # Feature = np.array(Feature)
    # # fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
    # # ax[0].plot(Feature, 'r')
    # Feature = median_filter(data=Feature, points=5, same_length=True)
    # # ax[1].plot(Feature, 'b')
    # # plt.show()
    # x_Feature = np.arange(len(Feature))

    # Train = Feature[0:int(config['train']*len(Feature))]
    # x_Train = np.arange(float(len(Train)))

    # x_Predict = np.linspace(len(Train), len(Feature), num=len(Feature) - len(Train), endpoint=False)

    # # scaler = StandardScaler()
    # # scaler = RobustScaler()
    # # scaler.fit(Train)
    # # Train = scaler.transform(Train)

    # clf = MLPRegressor(solver=config['solver'], alpha=config['alpha'], hidden_layer_sizes=config['layers'], random_state=config['rs'], activation=config['activation'], tol=config['tol'], verbose=True, max_iter=config['max_iter'])

    # # from sklearn.tree import DecisionTreeRegressor
    # # clf = DecisionTreeRegressor()

    # n_pre = int(config['n_pre']*len(Train))
    # m_post = int(config['m_post']*len(Train))
    # n_ex = len(Train) - n_pre - m_post
    # print('+++++++++++++Info: Input points n = ', n_pre)
    # print('+++++++++++++Info: Output points m = ', m_post)
    # print('+++++++++++++Info: Training examples = ', n_ex)
    # a = input('enter to continue...')
    # T_Inputs = []
    # T_Outputs = []
    # # plt.plot(Train, 'k')
    # # plt.show()
    # for k in range(n_ex + 1):

    # T_Inputs.append(Train[k : k + n_pre])
    # T_Outputs.append(Train[k + n_pre : k + n_pre + m_post])

    # # aa = np.arange(len(Train[k : k + n_pre]))
    # # plt.plot(aa, Train[k : k + n_pre], 'b')
    # # bb = np.max(aa) + np.arange(len(Train[k + n_pre : k + n_pre + m_post]))
    # # plt.plot(bb, Train[k + n_pre : k + n_pre + m_post], 'r')
    # # plt.show()
    # # sys.exit()

    # from sklearn.model_selection import KFold, GroupKFold
    # kf = KFold(n_splits=100, shuffle=True)

    # # X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 19]])
    # # y = np.array([[3], [5], [7], [9], [11], [13], [15], [17], [19]])
    # # # X = T_Inputs
    # # # y = T_Outputs
    # # for i in range(3):
    # # for train_index, test_index in kf.split(X):
    # # print("TRAIN:", train_index, "TEST:", test_index)
    # # # X_train, X_test = X[train_index], X[test_index]
    # # # y_train, y_test = y[train_index], y[test_index]
    # # sys.exit()
    # T_Inputs = np.array(T_Inputs)
    # T_Outputs = np.array(T_Outputs)
    # count = 0
    # epochs = 10
    # for i in range(epochs):
    # print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++Epoch', count)
    # numb = 0
    # for train_index, test_index in kf.split(T_Inputs):
    # print('+++++++++++Batch', numb)
    # T_Inputs_train, T_Inputs_test = T_Inputs[train_index], T_Inputs[test_index]
    # T_Outputs_train, T_Outputs_test = T_Outputs[train_index], T_Outputs[test_index]
    # clf.partial_fit(T_Inputs_test, T_Outputs_test)
    # numb += 1
    # count += 1

    # # clf.fit(T_Inputs, T_Outputs)
    # print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
    # Predict = []
    # It_Train = list(Train)

    # for k in range(len(x_Predict) + m_post - 1):
    # P_Input = It_Train[n_ex + k + 1 : n_ex + n_pre + k + 1]

    # P_Output = clf.predict([P_Input])
    # P_Output = P_Output[0]

    # Predict.append(P_Output[-1])
    # It_Train.append(P_Output[-1])

    # Predict = Predict[:-(m_post-1)]

    # fig, ax = plt.subplots()
    # ax.set_xlabel('Accumulated Operating Hours', fontsize=13)
    # ax.set_ylabel('Health Index', fontsize=13)
    # ax.set_title('Linear Regression', fontsize=13)
    # fact = 5./3600.
    # ax.plot(x_Feature*fact, Feature, 'b', label='Real')
    # ax.plot(x_Predict*fact, Predict, 'r', label='Prediction')
    # ax.plot(x_Train*fact, Train, 'k', label='Training')
    # ax.legend()
    # plt.show()

    # plt.plot(x_Feature, Feature, 'b', x_Predict, Predict, 'r', x_Train, Train, 'k')
    # plt.show()

    elif config['mode'] == 'predict_from_xls_svr':
        print('Select xls')
        root = Tk()
        root.withdraw()
        root.update()
        Filepaths = filedialog.askopenfilenames()
        root.destroy()

        Feature = []
        for filepath in Filepaths:
            mydict = pd.read_excel(filepath, sheetname=config['sheet'])

            mydict = mydict.to_dict(orient='list')
            Feature += mydict[config['feature']]
        Feature = list(np.nan_to_num(Feature))
        Feature = movil_avg(Feature, config['n_mov_avg'])

        Feature = np.array(Feature)
        x_Feature = np.arange(len(Feature))

        Train = Feature[0:int(config['train'] * len(Feature))]
        x_Train = np.arange(float(len(Train)))

        x_Predict = np.linspace(len(Train),
                                len(Feature),
                                num=len(Feature) - len(Train),
                                endpoint=False)

        # scaler = StandardScaler()
        # scaler = RobustScaler()
        # scaler.fit(Train)
        # Train = scaler.transform(Train)

        # clf = NuSVR(nu=0.5, C=1.0, kernel='poly')
        clf = SVR(kernel='poly', verbose=True, degree=6, C=0.5)

        n_pre = int(config['n_pre'] * len(Train))
        m_post = 1
        # m_post = int(config['m_post']*len(Train))
        n_ex = len(Train) - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        a = input('enter to continue...')
        T_Inputs = []
        T_Outputs = []
        for k in range(n_ex + 1):

            T_Inputs.append(Train[k:k + n_pre])
            T_Outputs.append(Train[k + n_pre:k + n_pre + m_post])

        clf.fit(T_Inputs, T_Outputs)
        print(
            '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n'
        )
        Predict = []
        It_Train = list(Train)

        for k in range(len(x_Predict) + m_post - 1):
            P_Input = It_Train[n_ex + k + 1:n_ex + n_pre + k + 1]

            P_Output = clf.predict([P_Input])
            P_Output = P_Output[0]

            Predict.append(P_Output)
            It_Train.append(P_Output)

        # Predict = Predict[:-(m_post-1)]

        fig, ax = plt.subplots()
        ax.set_xlabel('Accumulated Operating Hours', fontsize=13)
        ax.set_ylabel('Health Index', fontsize=13)
        ax.set_title('Linear Regression', fontsize=13)
        fact = 5. / 3600.
        ax.plot(x_Feature * fact, Feature, 'b', label='Real')
        ax.plot(x_Predict * fact, Predict, 'r', label='Prediction')
        ax.plot(x_Train * fact, Train, 'k', label='Training')
        ax.legend()
        plt.show()

        # plt.plot(x_Feature, Feature, 'b', x_Predict, Predict, 'r', x_Train, Train, 'k')
        # plt.show()

    elif config['mode'] == 'predict_from_xls_lin':
        print('Select xls')
        root = Tk()
        root.withdraw()
        root.update()
        Filepaths = filedialog.askopenfilenames()
        root.destroy()

        Feature = []
        for filepath in Filepaths:
            mydict = pd.read_excel(filepath, sheetname=config['sheet'])

            mydict = mydict.to_dict(orient='list')
            Feature += mydict[config['feature']]
        Feature = list(np.nan_to_num(Feature))
        Feature = movil_avg(Feature, config['n_mov_avg'])

        # Feature = median_filter(data=Feature, points=5, same_length=True)

        Feature = np.array(Feature)
        x_Feature = np.arange(len(Feature))

        Train = Feature[0:int(config['train'] * len(Feature))]
        x_Train = np.arange(float(len(Train)))

        slope, intercept, r_value, p_value, std_err = stats.linregress(
            x_Train, Train)

        x_Predict = np.linspace(len(Train),
                                len(Feature),
                                num=len(Feature) - len(Train),
                                endpoint=False)

        Predict = slope * x_Predict + intercept

        fig, ax = plt.subplots()
        ax.set_xlabel('Accumulated Operating Hours', fontsize=13)
        ax.set_ylabel('Health Index', fontsize=13)
        # ax.set_title('Linear Regression', fontsize=13)
        ax.set_title('Neural Network', fontsize=13)
        fact = 5. / 3600.
        ax.plot(x_Feature * fact, Feature, 'b', label='Real')
        ax.plot(x_Predict * fact, Predict, 'r', label='Prediction')
        ax.plot(x_Train * fact, Train, 'k', label='Training')
        ax.legend()
        plt.show()

        print(len(Feature))
    else:
        print('unknown mode')
        sys.exit()

    return
Beispiel #14
0
from pykalman import UnscentedKalmanFilter
import numpy as np

ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w),
                            lambda x, v: x + v,
                            observation_covariance=0.1)

(filtered_state_means, filtered_state_covariance) = ukf.filter([0, 0, 0])
(smoothed_state_means, smoothed_state_covariance) = ukf.smooth([0, 0, 0])


def smothingData(args):
    delta = ukf.smooth(args)[0]
    return delta


def filteringData(args):
    alpha = ukf.filter(args)
    return alpha
Beispiel #15
0
    def filtering(timeseries=None, filterType='highPassRealTime'):
        '''
        filterType can be 
            highPassRealTime 
            highPassBetweenRuns 
            UnscentedKalmanFilter_filter # documentation: https://pykalman.github.io/
            UnscentedKalmanFilter_smooth
            KalmanFilter_filter
            KalmanFilter_smooth
            noFilter
        '''

        timeseries = timeseries.astype(np.float)
        oldShape = timeseries.shape
        timeseries = timeseries.reshape(timeseries.shape[0], -1)
        if filterType == 'highPassRealTime':
            # from highpassFunc import highPassRealTime, highPassBetweenRuns
            from highpass import highpass

            def highPassRealTime(A_matrix, TR, cutoff):
                full_matrix = np.transpose(
                    highpass(np.transpose(A_matrix), cutoff / (2 * TR), True))
                return full_matrix[-1, :]

            filtered_timeseries = []
            for currTR in range(timeseries.shape[0]):
                filtered_timeseries.append(
                    highPassRealTime(timeseries[:(currTR + 1)], 1.5, 56))
            filtered_timeseries = np.asarray(filtered_timeseries)
        elif filterType == 'highPassBetweenRuns':
            # from highpassFunc import highPassRealTime, highPassBetweenRuns
            from highpass import highpass

            def highPassBetweenRuns(A_matrix, TR, cutoff):
                return np.transpose(
                    highpass(np.transpose(A_matrix), cutoff / (2 * TR), False))

            filtered_timeseries = highPassBetweenRuns(timeseries, 1.5, 56)
        elif filterType == 'UnscentedKalmanFilter_filter':
            from pykalman import UnscentedKalmanFilter
            ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w),
                                        lambda x, v: x + v,
                                        observation_covariance=0.1)
            filtered_timeseries = np.zeros(timeseries.shape)
            for curr_voxel in range(timeseries.shape[1]):
                (filtered_timeseries_state_means,
                 filtered_timeseries_state_covariances) = ukf.filter(
                     timeseries[:, curr_voxel])
                filtered_timeseries[:,
                                    curr_voxel] = filtered_timeseries_state_means.reshape(
                                        -1)
        elif filterType == 'UnscentedKalmanFilter_smooth':
            from pykalman import UnscentedKalmanFilter
            ukf = UnscentedKalmanFilter(lambda x, w: x + np.sin(w),
                                        lambda x, v: x + v,
                                        observation_covariance=0.1)
            filtered_timeseries = np.zeros(timeseries.shape)
            for curr_voxel in range(timeseries.shape[1]):
                (smoothed_state_means,
                 smoothed_state_covariances) = ukf.smooth(data)
                filtered_timeseries[:,
                                    curr_voxel] = smoothed_state_means.reshape(
                                        -1)
        elif filterType == 'KalmanFilter_filter':
            from pykalman import KalmanFilter
            kf = KalmanFilter(transition_matrices=None,
                              observation_matrices=None)
            filtered_timeseries = np.zeros(timeseries.shape)
            for curr_voxel in range(timeseries.shape[1]):
                measurements = np.asarray(timeseries[:, curr_voxel])
                kf = kf.em(measurements, n_iter=5)
                (filtered_state_means,
                 filtered_state_covariances) = kf.filter(measurements)
                filtered_timeseries[:,
                                    curr_voxel] = filtered_state_means.reshape(
                                        -1)
        elif filterType == 'KalmanFilter_smooth':
            from pykalman import KalmanFilter
            kf = KalmanFilter(transition_matrices=None,
                              observation_matrices=None)
            filtered_timeseries = np.zeros(timeseries.shape)
            for curr_voxel in range(timeseries.shape[1]):
                measurements = np.asarray(timeseries[:, curr_voxel])
                kf = kf.em(measurements, n_iter=5)
                (smoothed_state_means,
                 smoothed_state_covariances) = kf.smooth(measurements)
                filtered_timeseries[:,
                                    curr_voxel] = smoothed_state_means.reshape(
                                        -1)
        elif filterType == 'noFilter':
            filtered_timeseries = timeseries
        else:
            raise Exception('filterType wrong')

        filtered_timeseries = filtered_timeseries.reshape(oldShape)
        return filtered_timeseries
    ym = np.sin(state[0]) + noise[1]
    return np.array([xm, ym])


'''----INITIALIZE THE PARAMETERS----'''
transition_covariance = np.eye(2)
noise_generator = np.random.RandomState(0)
observation_covariance = np.eye(2) + noise_generator.randn(2, 2) * 0.1
Initial_state = [0, 0]
intial_covariance = [[1, 0.1], [-0.1, 1]]

# UKF
kf = UnscentedKalmanFilter(transition_function,
                           observation_function,
                           transition_covariance,
                           observation_covariance,
                           Initial_state,
                           intial_covariance,
                           random_state=noise_generator)

sample = 200
states, observations = kf.sample(sample, Initial_state)
# estimate state with filtering and smoothing
filtered_state_estimates = kf.filter(observations)[0]
smoothed_state_estimates = kf.smooth(observations)[0]

# True line
t = np.linspace(0, sample * 0.1, sample)
y = np.sin(t)

plt.plot(filtered_state_estimates[:, 0],
import numpy as np
import pandas as pd
from matplotlib import cm, pyplot as plt
from pykalman import UnscentedKalmanFilter
from hmmlearn.hmm import GaussianHMM

ukf = UnscentedKalmanFilter(initial_state_mean=0, n_dim_obs=1)


def smooth(df, colname):
    df[colname + '_smoothed'] = ukf.smooth(df[colname].values)[0]
    return df  

def parse_fmftime(namestring):
    fn = namestring.split('/')[-1]
    exp_id, CAMN, DATE, TIME = fn.split('_', 3)
    FLY_ID = exp_id + '_' + DATE + '_' + TIME
    fmftime = pd.to_datetime(DATE + TIME)
    return FLY_ID, fmftime, exp_id
    

treatments = ['00','11','15','65']
genotypes = ['DB072','DB185','DB213']

if __name__ == "__main__":

    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--flymad_dir', type=str, required=True,
                            help='directory of flymad files')  
    parser.add_argument('--parameters', type=list, required=True,
                            help='parameters to define HMM')