コード例 #1
0
from scipy import stats

from scipy.stats.stats import pearsonr

from keras.layers import Input, Dense
from keras.models import Model
from sklearn.metrics import mean_squared_error
from keras import regularizers

rawsnX = np.array(pd.read_csv('_dataset2_X_zpad.csv', header=None))

rawsnY = np.array(pd.read_csv('_dataset2_YnZ.csv', header=None))
rawsnY = rawsnY[:, -1]

## L2 regularization done on activation in autoencoder.
X = subfn.scaler(rawsnX)
N_dim = X.shape[1]
encoding_dim = 2000
input_dim = Input(shape=(N_dim, ))
encoder = Dense(encoding_dim,
                activation='tanh',
                activity_regularizer=regularizers.l2(10e-9))(input_dim)
decoder = Dense(N_dim, activation='tanh')(encoder)
ae = Model(input_dim, decoder)
ae.compile(loss='mean_squared_error', optimizer='sgd')
ae.fit(X, X, batch_size=16, epochs=100)

encoder = Model(input_dim, encoder)
Z = encoder.predict(X)

np.savetxt('L2_SAERSD_data.csv', Z, delimiter=',')
コード例 #2
0
import scipy
from scipy import stats

from scipy.stats.stats import pearsonr

from keras.layers import Input, Dense
from keras.models import Model
from sklearn.metrics import mean_squared_error

import subfn
Disp_mode = 1

rawX, rawY = subfn.dataset2_stats_recipe(
    disp_mode = Disp_mode)

X = subfn.scaler(rawX)
N_dim = X.shape[1]

## Learning new features by Deep Autoencoder from process statistics dataset.
input_dim = Input(shape=(44370,))
encoder1 = Dense(4000, activation='tanh')(input_dim)
encoder2 = Dense(3000, activation='tanh')(encoder1)
encoder3 = Dense(2000, activation='tanh')(encoder2)
decoder1 = Dense(3000, activation='tanh')(encoder3)
decoder2 = Dense(4000, activation='tanh')(decoder1)
decoder3 = Dense(44370, activation='tanh')(decoder2)
dae = Model(input_dim, decoder3)
dae.compile(loss='mean_squared_error', optimizer='sgd')
dae.fit(X, X, batch_size=16, epochs=100)

encoder= Model(input_dim, encoder3)
コード例 #3
0
ファイル: Predictions.py プロジェクト: hbk008/MS-Thesis-New
# raw(Y) 14 CDs: from 1 to 14
# raw(Z) a yield rate: 0 
List_output = [1]
if Disp_mode > 0: print 'Chosen output', List_output,' th'
'''
#rawsnX = np.array(pd.read_csv('_dataset2_X_zpad.csv', header=None))

rawsnY = np.array(pd.read_csv('_dataset2_YnZ.csv', header=None))
rawsnY = rawsnY[:, -1]

## Replace the dataset in the below line everytime you run the code with a ##different dataset. Currently the dataset is for features learnt by basic one ## hidden layer autoencoder on raw sensor dataset.
Z_AERSD = np.array(pd.read_csv('AERSD_data.csv', header=None))

# Preprocessing ----------------------------------------------------------------
# Scale data with range of [-1, 1]
X = subfn.scaler(Z_AERSD)
Y = subfn.scaler(rawsnY)

N__obs = X.shape[0]  # total no. observations in the dataset
N__dim = X.shape[1]  # total no. dimensions in the dataset

#N__SN = 85 - len(Exclusion_SN) # no. of sensors chosen
#N__dim_SN = N__dim/N__SN # no. of time stamps of each sensor

################################################################################
# iterations with different random seeds (different divisions for tr & te sets)
res_dnn = []
res_mlr = []
res_las = []
res_rid = []
res_svr = []