Exemplo n.º 1
0
class WindPuller(object):
    def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." % (
        lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                            recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                            recurrent_initializer='orthogonal', bias_initializer='zeros',
                            dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchNormalization(axis=-1))
        self.model.add(Activation("relu(alpha=0., max_value=1.0)"))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                           optimizer=opt,
                           metrics=['accuracy'])

    def fit(self, x, y, batch_size=32, nb_epoch=100, verbose=1, callbacks=None,
            validation_split=0., validation_data=None, shuffle=True,
            class_weight=None, sample_weight=None, initial_epoch=0):
        self.model.fit(x, y, batch_size, nb_epoch, verbose, callbacks,
                       validation_split, validation_data, shuffle, class_weight, sample_weight,
                       initial_epoch)

    def save(self, path):
        self.model.save(path)

    def load_model(self, path):
        self.model = load_model(path)
        return self

    def evaluate(self, x, y, batch_size=32, verbose=1,
                 sample_weight=None, **kwargs):
        return self.model.evaluate(x, y, batch_size, verbose,
                                   sample_weight)

    def predict(self, x, batch_size=32, verbose=0):
        return self.model.predict(x, batch_size, verbose)
Exemplo n.º 2
0
print('x_train shape :', x_train.shape)
print('x_test shape :', x_test.shape)
print('y_train shape :', y_train.shape)
print('y_test shape :', y_test.shape)

batch_size = 32
epochs = 5

model = Sequential()
model.add(Dense(512, input_shape=(max_words, )))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_split=0.1)

score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])

y_train
    uniques, ids = np.unique(
        arr, return_inverse=True)  # convert 3 words into 0, 1, 2
    return to_categorical(ids, len(uniques))  # convert 0, 1, 2 to one-hot


train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)

model = Sequential()

model.add(Dense(16, input_shape=(4, )))  # each sample has 4 features
model.add(Activation('sigmoid'))  # add non-linearity to hidden layer 1

model.add(Dense(3))  # add another 3 neuron final layer
model.add(Activation('softmax'))  # give it non-linearity as output
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=["accuracy"])

model.fit(train_X,
          train_y_ohe,
          validation_split=0.2,
          epochs=10,
          batch_size=1,
          verbose=1)

loss, accuracy = model.evaluate(test_X, test_y_ohe, batch_size=32, verbose=1)
print("Accuracy = {:.2f}".format(accuracy))
Exemplo n.º 4
0
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical
import numpy as np

# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', # binary classification
              optimizer='rmsprop',
              metrics=['accuracy'])

hist = model.fit(x_train, y_train,
          validation_split=0.2,
          epochs=1,
          batch_size=128)

hist.history
score = model.evaluate(x_test, y_test, batch_size=128)
Exemplo n.º 5
0
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
es_cb = EarlyStopping(monitor='val_loss', mode='auto', patience=30, verbose=1)
tb_cb = TensorBoard(log_dir='./logs', write_graph=True)
hist = model.fit(train,
                 train_label,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_split=1 / 7,
                 shuffle=True,
                 verbose=1,
                 callbacks=[es_cb, tb_cb, csv_logger])

score = model.evaluate(test, test_label, verbose=1)
print('test loss:', score[0])
print('test acc:', score[1])

# plot results
plt.subplot(3, 1, 1)
loss = hist.history['loss']
val_loss = hist.history['val_loss']

epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss')
plt.plot(range(epochs), val_loss, marker='.', label='val_loss')
plt.legend(loc='best')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
Exemplo n.º 6
0
def main():

    start = time.time()

    # generate multiple time-series sequences
    dataframe = generate_sine_data()

    dataset = dataframe.values.astype('float32')
    # put dataset of multiple time-series sequences
    dataset = Normalize(dataset)

    # create dataset
    length = len(dataset)
    train_size = int(length * 0.67)
    test_size = length - train_size
    train, test = dataset[:train_size], dataset[train_size:]

    trainX, trainY = create_dataset(train)
    testX, testY = create_dataset(test)

    trainX = trainX[len(trainX) % BATCH_SIZE:]
    trainY = trainY[len(trainY) % BATCH_SIZE:]
    length_test = len(testX)
    testX = testX[len(testX) % BATCH_SIZE:]
    testY = testY[len(testY) % BATCH_SIZE:]

    trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 3))
    testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 3))

    # construct the DNN model (LSTM + fully_connected_layer)
    model = Sequential()
    model.add(LSTM(HIDDEN_SIZE, batch_input_shape=(BATCH_SIZE, Tau, 3)))
    model.add(Dense(3))
    model.summary()
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])

    # learn the DNN model on training dataset
    hist = model.fit(trainX,
                     trainY,
                     batch_size=BATCH_SIZE,
                     epochs=EPOCHS,
                     verbose=0,
                     shuffle=True)

    # plot the leargning curve
    if PLT:
        epochs = range(1, 11)
        plt.figure()
        plt.plot(epochs, hist.history['loss'], label='loss/training')
        plt.plot(epochs, hist.history['acc'], label='acc/training')
        plt.xlabel('epoch')
        plt.ylabel('acc / loss')
        plt.legend()
        plt.show()
        plt.close()

    # evaluate the DNN model on test dataset
    score = model.evaluate(testX, testY, batch_size=BATCH_SIZE, verbose=0)
    print('loss: {0[0]}, acc: {0[1]} on test dataset'.format(score))

    # forecast Ls-steps-ahead value on the test dataset
    predicted = model.predict(testX, batch_size=BATCH_SIZE)

    # plot testY and predictedY
    if PLT:
        df_out = pd.DataFrame(predicted[:200])
        df_out.columns = [
            "predicted_sine", 'predicted_sine_rand', 'predicted_sine_int'
        ]
        df_out = pd.concat([
            df_out,
            pd.DataFrame(
                testY[:200],
                columns=["input_sine", "input_sine_rand", "input_sine_int"])
        ])
        plt.figure()
        df_out.plot()
        plt.show()
        plt.close()

    # plot the forecasting results on test dataset
    if PLT:
        plt.ion()
        i = 0
        while i < 20:
            K = 3  # the number of sensor
            fig = plt.figure(figsize=(8, K * 4))
            plt.subplots_adjust(hspace=0.2)
            for j in range(K):
                plt.subplot(K, 1, j + 1)
                plt.plot(range(i, i + Tau + Ls + 1),
                         test[length_test % BATCH_SIZE:][i:i + Tau + Ls + 1,
                                                         j],
                         color='silver',
                         label='original')
                plt.plot(range(i, i + Tau),
                         testX[i, :, j],
                         color='dodgerblue',
                         label='input')
                plt.scatter(i + Tau + Ls,
                            predicted[i, j],
                            s=15,
                            color='orange',
                            label='forecast')
                plt.legend()
            plt.draw()
            plt.pause(1.2)
            plt.clf()
            i += 1
        plt.close()

    end = time.time()
    print('elapsed_time: {}[s]'.format(end - start))
Exemplo n.º 7
0
# hist = fit() will record a loss for each epoch
#######################################################

hist1 = model.fit(X, y, batch_size=1, validation_split=0.25,
                  epochs=10)  # accuracy 0.75
hist2 = model.fit(X, y, batch_size=1, epochs=1000)  # accuracy 0.75

# See how weights changes to make function more close to xor function
epochs = 5
for epoch in range(epochs):
    print("epoch:", epoch)
    model.fit(X, y, batch_size=1, epochs=1)
    print("Layer1 weights shape:")
    print(model.layers[0].weights)
    print("Layer1 kernel:")
    print(model.layers[0].get_weights()
          [0])  # each training, network step closer to xor function
    print("Layer1 bias:")
    print(model.layers[0].get_weights()[1])

print(model.predict(X))
print(model1.predict(X))
error = model.evaluate([X], [y])
print("error", error)
"""
[[ 0.0033028 ]
 [ 0.99581173]
 [ 0.99530098]
 [ 0.00564186]]
"""
Exemplo n.º 8
0
# Model
model = Sequential()

#defaultformat: (samples, rows, col
model.add(
    Convolution2D(32,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  activation='relu',
                  input_shape=(28, 28, 1)))
model.add(
    Convolution2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)

score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Exemplo n.º 9
0
class MLP_keras:
    def __init__(self, learning_rate, layers, functions, optimizer_name,
                 beta=0.0, dropout=1.0):
        
        self.n_input = layers[0]
        self.n_hidden = layers[1:-1]
        self.n_output = layers[-1]
        
        self.model = Sequential()
        
        if len(self.n_hidden) == 0:
            # single layer
            self.model.add(Dense(self.n_output, activation=functions[0],
                             kernel_regularizer=regularizers.l2(beta),
                             input_shape=(self.n_input,)))
            
        elif len(self.n_hidden) == 1:
            # hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # output layer
            self.model.add(Dense(self.n_output, activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            
        else:
            # the first hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # the second hidden layer
            self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            self.model.add(Dropout(dropout))
            # the output layer
            self.model.add(Dense(self.n_output, activation=functions[2],
                                 kernel_regularizer=regularizers.l2(beta)))
        
        self.model.summary()
        
        if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
        
        #self.model.compile(loss='mean_squared_error',
        #                   optimizer=optimizer,
        #                   metrics=['accuracy'])
        
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
    
    def train(self, epochs, trn, vld=None, batch_size=32, es=0):
        if vld is not None:
            validation_data = (vld.x, vld.y)
            if es > 0:
                callbacks = [EarlyStopping(monitor='val_loss', patience=es)]
            else:
                callbacks = None
        else:
            validation_data = None
            callbacks = None
        
        self.model.fit(trn.x, trn.y,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=2,
                       callbacks=callbacks,
                       validation_data=validation_data)
        
        loss, tst_acc = self.model.evaluate(trn.x, trn.y, verbose=0)
        if vld is not None:
            _, vld_acc = self.model.evaluate(vld.x, vld.y, verbose=0)
        else:
            vld_acc = 0
        return ['', loss, tst_acc, vld_acc, 0]
    
    def evaluate(self, x_test, y_test):
        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
    
    def score(self, tst):
        return self.model.evaluate(tst.x, tst.y, verbose=0)[1]
    
    def predict_proba(self, x):
        if x is None: return None
        return self.model.predict_proba(x, verbose=0)
              metrics=['accuracy'])

import numpy as np
x_train = np.random.random((1000, seq_length, 100))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, seq_length, 100))
y_test = np.random.randint(2, size=(100, 1))

hist = model.fit(x_train,
                 y_train,
                 validation_split=0.2,
                 batch_size=16,
                 epochs=1)
hist.history

score = model.evaluate(x_test, y_test, batch_size=16)  # one batch at a time
"""
(['class Conv1D(tf_convolutional_layers.Conv1D, Layer):\n',
    1D convolution layer (e.g. temporal convolution).\n',
  '\n',
  '  This layer creates a convolution kernel that is convolved\n',
  '  with the layer input over a single spatial (or temporal) dimension\n',
  '  to produce a tensor of outputs.\n',
  '  If `use_bias` is True, a bias vector is created and added to the '
  'outputs.\n',
  '  Finally, if `activation` is not `None`,\n',
  '  it is applied to the outputs as well.\n',
  '\n',
  '  When using this layer as the first layer in a model,\n',
  '  provide an `input_shape` argument\n',
  '  (tuple of integers or `None`, e.g.\n',