Beispiel #1
0
def no_failure_data(data, n, failure_date):
    failure_date = pd.to_datetime(failure_date)
    length_of_window = n
    data = data[~pd.isnull(data.StationID)]
    data = data[~pd.isnull(data.MainBearingGTemp)]
    data = data[~pd.isnull(data.MainBearingHTemp)]
    data = data[~pd.isnull(data.GearOilTemp)]
    data = data[~pd.isnull(data.AmbientTemp)]
    data = data[~pd.isnull(data.NacelleTemp)]
    data = data[~pd.isnull(data.ActivePower)]
    data = data[~pd.isnull(data.GenRPM)]

    ### Filters!

    data = data.drop(data[data.ActivePower < 1000].index)
    data = data.reset_index()

    ##### The first thing we want to do, is to normalize the data
    data.GearOilTemp = (data.GearOilTemp - np.mean(data.GearOilTemp)) / np.std(data.GearOilTemp)
    data.AmbientTemp = (data.AmbientTemp - np.mean(data.AmbientTemp)) / np.std(data.AmbientTemp)
    data.NacelleTemp = (data.NacelleTemp - np.mean(data.NacelleTemp)) / np.std(data.NacelleTemp)
    m = 0.5 * (data.ActivePower.max(skipna=True) + data.ActivePower.min(skipna=True))
    s = 0.5 * (data.ActivePower.max(skipna=True) - data.ActivePower.min(skipna=True))
    data.ActivePower = (data.ActivePower - m) / s
    m = 0.5 * (data.GenRPM.max(skipna=True) + data.GenRPM.min(skipna=True))
    s = 0.5 * (data.GenRPM.max(skipna=True) - data.GenRPM.min(skipna=True))
    data.GenRPM = (data.GenRPM - m) / s

    #### Training, test and validation set. Training data is extracted from assumed no-fault states in the first 14 months of operation. However, the
    ### first month is deleted, to make up for commisionning and testing

    data['labels'] = pd.Series(0, index=data.index)

    training_data = data
    # start = training_data.Time[0] + timedelta(days=60)
    start = training_data.Time[0] + timedelta(days=60)
    training_data = training_data.drop(training_data[training_data.Time < start].index)  # Delete the first months
    training_data = training_data.reset_index()
    del training_data['index']

    ### Filters!

    X_test = training_data[["GearOilTemp", "AmbientTemp", "NacelleTemp", "ActivePower", "GenRPM"]]
    Y_test = training_data[["MainBearingHTemp", "labels"]]
    X_test = np.asarray(X_test)
    Y_test = np.asarray(Y_test)

    ##### Establish sliding window!
    n = length_of_window
    X_test = Data_processing.Sliding_window(X_test, n)
    Y_test = np.delete(Y_test, [range(n)], 0)

    # Adding bias term to X
    X_BIAS = np.ones((X_test.shape[0], X_test.shape[1] + 1))
    X_BIAS[:, :-1] = X_test
    X_test = X_BIAS

    # Test and validation set
    return X_test, Y_test
Beispiel #2
0
                    delimiter=",",
                    decimal=",")
data4.Time = pd.to_datetime(data4.Time)

X_G01, Y_G01 = no_failure_data(data1, n, 20)
X_G02, Y_G02 = no_failure_data(data2, n, 20)
X_G03, Y_G03 = no_failure_data(data3, n, 20)
X_G04, Y_G04 = no_failure_data(data4, n, 20)

sess = tf.Session()
pred_G01, Res_G01 = SimpleNetwork(X_G01, Y_G01, 'SimpleNetwork-40')
pred_G02, Res_G02 = SimpleNetwork(X_G02, Y_G02, 'SimpleNetwork-41')
pred_G03, Res_G03 = SimpleNetwork(X_G03, Y_G03, 'SimpleNetwork-42')
pred_G04, Res_G04 = SimpleNetwork(X_G04, Y_G04, 'SimpleNetwork-43')

Res_G01 = Data_processing.Sliding_window(Res_G01, 5)
Res_G01 = np.expand_dims(np.mean(Res_G01, 1), 1)
Res_G02 = Data_processing.Sliding_window(Res_G02, 5)
Res_G02 = np.expand_dims(np.mean(Res_G02, 1), 1)
Res_G03 = Data_processing.Sliding_window(Res_G03, 5)
Res_G03 = np.expand_dims(np.mean(Res_G03, 1), 1)
Res_G04 = Data_processing.Sliding_window(Res_G04, 5)
Res_G04 = np.expand_dims(np.mean(Res_G04, 1), 1)

Results_G01 = np.append(Res_G01, np.expand_dims(Y_G01[5:, 1], 1), 1)
Results_G02 = np.append(Res_G02, np.expand_dims(Y_G02[5:, 1], 1), 1)
Results_G03 = np.append(Res_G03, np.expand_dims(Y_G03[5:, 1], 1), 1)
Results_G04 = np.append(Res_G04, np.expand_dims(Y_G04[5:, 1], 1), 1)

Results_nofault = np.append(Results_G01, Results_G02, 0)
Results_nofault = np.append(Results_nofault, Results_G03, 0)
Beispiel #3
0
n = 24 # sliding window length


COLUMNS = ["Time", "StationID", "MainBearingGTemp", "MainBearingHTemp", "GearOilTemp", "AmbientTemp",
               "NacelleTemp", "ActivePower", "GenRPM"]
data1 = pd.read_csv("No_fault_data/A03.csv", names=COLUMNS, skiprows=1, delimiter=",", decimal=",")
data1.Time = pd.to_datetime(data1.Time)
data2 = pd.read_csv("No_fault_data/A04.csv", names=COLUMNS, skiprows=1, delimiter=",", decimal=",")
data2.Time = pd.to_datetime(data2.Time)
data3 = pd.read_csv("No_fault_data/A05.csv", names=COLUMNS, skiprows=1, delimiter=",", decimal=",")
data3.Time = pd.to_datetime(data3.Time)
data4 = pd.read_csv("No_fault_data/B03.csv", names=COLUMNS, skiprows=1, delimiter=",", decimal=",")
data4.Time = pd.to_datetime(data4.Time)


X_A03, Y_A03 , _, _, _, _ = Data_processing.define_data(data1, 24)
X_A04, Y_A04 , _, _, _, _ = Data_processing.define_data(data2, 24)
X_A05, Y_A05 , _, _, _, _ = Data_processing.define_data(data3, 24)
X_B03, Y_B03 , _, _, _, _ = Data_processing.define_data(data4, 24)

W_A03 = LinearModel(X_A03, Y_A03)
W_A04 = LinearModel(X_A04, Y_A04)
W_A05 = LinearModel(X_A05, Y_A05)
W_B03 = LinearModel(X_B03, Y_B03)

X_A03_test, Y_A03_test = no_failure_data(data1, n, 20)
X_A04_test, Y_A04_test = no_failure_data(data2, n, 20)
X_A05_test, Y_A05_test = no_failure_data(data3, n, 20)
X_B03_test, Y_B03_test = no_failure_data(data4, n, 20)

sess = tf.Session()
Beispiel #4
0
from tensorflow.core.protobuf import saver_pb2

## Exstract data from function

path = "No_fault_data"
Model_no = 40
files = [f for f in os.listdir(path)]
for f in files:
    tf.reset_default_graph()
    sess = tf.Session()
    COLUMNS = ["Time", "StationID", "MainBearingGTemp", "MainBearingHTemp", "GearOilTemp", "AmbientTemp",
               "NacelleTemp", "ActivePower", "GenRPM"]
    data = pd.read_csv(path + "/" + f, names=COLUMNS, skiprows=1, delimiter=",", decimal=",")
    data.Time = pd.to_datetime(data.Time)

    X_train, Y_train, X_test, Y_test, X_val, Y_val = Data_processing.define_data(data, 24)


    activation_functions = tf.nn.sigmoid #best choice by validation, using LR = 0.001, training_epochs = 1000, batch_size = 1000
    LAMBDA = 0.0001
    # Parameters
    momentum = 0.9
    learning_rate = 0.001
    training_epochs = 1000
    display_step = 1000
    batch_size = 1000

    LAMBDA_hid = LAMBDA
    LAMBDA_out = LAMBDA_hid

    # Network parameters