'\n',
  '  def __init__(self, activation, **kwargs):\n',
"""

from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Activation

model = Sequential([
    Dense(32, input_shape=(784, )),
    Activation('relu'),
    Dense(10),
    Activation('softmax'),
])

model.summary()
"""
You can also simply add layers via the `.add()` method:
"""

model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))

model.summary()
"""
## Specifying the input shape

The first layer needs to receive information about its input shape.
Esempio n. 2
0
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# y = np.array([[0],[1],[1],[0]])
# it is better to be replaced with true target function ^ or xor function
y = X[:, 0] ^ X[:, 1]  # ^ is xor function

#######################################################
# Sequential can specify more activations layer from normal layers than Model
#######################################################
# check whether 3 models are the same or not
model = Sequential()
model.add(Dense(4, input_dim=2,
                name='dense1'))  # 2 nodes reaches 50% accuracy, 8 nodes 100%
model.add(Activation('relu', name='dense1_act'))
model.add(Dense(1, name='dense2'))
model.add(Activation('sigmoid', name='dense2_act'))  # output shaped by sigmoid
model.summary()  # see each layer of a model

# use Model instead of Sequential
input_tensor = Input(shape=(2, ), name='input')
hidden = Dense(4, activation='relu', name='dense1_relu')(input_tensor)
output = Dense(1, activation='sigmoid',
               name='dense2_sigm')(hidden)  # output shaped by sigmoid
model1 = Model(inputs=input_tensor, outputs=output)
model1.summary()  # see each layer of a model
"""
# use Model to split layer and activation
input_tensor = Input(shape=(2,))
hidden = Dense(2)(input_tensor)
relu_hid = K.relu(hidden)
dense_out = Dense(1)(relu_hid) # output shaped by sigmoid
sigmoid_out = K.sigmoid(dense_out)
Esempio n. 3
0
class MLP_keras:
    def __init__(self, learning_rate, layers, functions, optimizer_name,
                 beta=0.0, dropout=1.0):
        
        self.n_input = layers[0]
        self.n_hidden = layers[1:-1]
        self.n_output = layers[-1]
        
        self.model = Sequential()
        
        if len(self.n_hidden) == 0:
            # single layer
            self.model.add(Dense(self.n_output, activation=functions[0],
                             kernel_regularizer=regularizers.l2(beta),
                             input_shape=(self.n_input,)))
            
        elif len(self.n_hidden) == 1:
            # hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # output layer
            self.model.add(Dense(self.n_output, activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            
        else:
            # the first hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # the second hidden layer
            self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            self.model.add(Dropout(dropout))
            # the output layer
            self.model.add(Dense(self.n_output, activation=functions[2],
                                 kernel_regularizer=regularizers.l2(beta)))
        
        self.model.summary()
        
        if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
        
        #self.model.compile(loss='mean_squared_error',
        #                   optimizer=optimizer,
        #                   metrics=['accuracy'])
        
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
    
    def train(self, epochs, trn, vld=None, batch_size=32, es=0):
        if vld is not None:
            validation_data = (vld.x, vld.y)
            if es > 0:
                callbacks = [EarlyStopping(monitor='val_loss', patience=es)]
            else:
                callbacks = None
        else:
            validation_data = None
            callbacks = None
        
        self.model.fit(trn.x, trn.y,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=2,
                       callbacks=callbacks,
                       validation_data=validation_data)
        
        loss, tst_acc = self.model.evaluate(trn.x, trn.y, verbose=0)
        if vld is not None:
            _, vld_acc = self.model.evaluate(vld.x, vld.y, verbose=0)
        else:
            vld_acc = 0
        return ['', loss, tst_acc, vld_acc, 0]
    
    def evaluate(self, x_test, y_test):
        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
    
    def score(self, tst):
        return self.model.evaluate(tst.x, tst.y, verbose=0)[1]
    
    def predict_proba(self, x):
        if x is None: return None
        return self.model.predict_proba(x, verbose=0)
Esempio n. 4
0
def main():

    start = time.time()

    # generate multiple time-series sequences
    dataframe = generate_sine_data()

    dataset = dataframe.values.astype('float32')
    # put dataset of multiple time-series sequences
    dataset = Normalize(dataset)

    # create dataset
    length = len(dataset)
    train_size = int(length * 0.67)
    test_size = length - train_size
    train, test = dataset[:train_size], dataset[train_size:]

    trainX, trainY = create_dataset(train)
    testX, testY = create_dataset(test)

    trainX = trainX[len(trainX) % BATCH_SIZE:]
    trainY = trainY[len(trainY) % BATCH_SIZE:]
    length_test = len(testX)
    testX = testX[len(testX) % BATCH_SIZE:]
    testY = testY[len(testY) % BATCH_SIZE:]

    trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 3))
    testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 3))

    # construct the DNN model (LSTM + fully_connected_layer)
    model = Sequential()
    model.add(LSTM(HIDDEN_SIZE, batch_input_shape=(BATCH_SIZE, Tau, 3)))
    model.add(Dense(3))
    model.summary()
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])

    # learn the DNN model on training dataset
    hist = model.fit(trainX,
                     trainY,
                     batch_size=BATCH_SIZE,
                     epochs=EPOCHS,
                     verbose=0,
                     shuffle=True)

    # plot the leargning curve
    if PLT:
        epochs = range(1, 11)
        plt.figure()
        plt.plot(epochs, hist.history['loss'], label='loss/training')
        plt.plot(epochs, hist.history['acc'], label='acc/training')
        plt.xlabel('epoch')
        plt.ylabel('acc / loss')
        plt.legend()
        plt.show()
        plt.close()

    # evaluate the DNN model on test dataset
    score = model.evaluate(testX, testY, batch_size=BATCH_SIZE, verbose=0)
    print('loss: {0[0]}, acc: {0[1]} on test dataset'.format(score))

    # forecast Ls-steps-ahead value on the test dataset
    predicted = model.predict(testX, batch_size=BATCH_SIZE)

    # plot testY and predictedY
    if PLT:
        df_out = pd.DataFrame(predicted[:200])
        df_out.columns = [
            "predicted_sine", 'predicted_sine_rand', 'predicted_sine_int'
        ]
        df_out = pd.concat([
            df_out,
            pd.DataFrame(
                testY[:200],
                columns=["input_sine", "input_sine_rand", "input_sine_int"])
        ])
        plt.figure()
        df_out.plot()
        plt.show()
        plt.close()

    # plot the forecasting results on test dataset
    if PLT:
        plt.ion()
        i = 0
        while i < 20:
            K = 3  # the number of sensor
            fig = plt.figure(figsize=(8, K * 4))
            plt.subplots_adjust(hspace=0.2)
            for j in range(K):
                plt.subplot(K, 1, j + 1)
                plt.plot(range(i, i + Tau + Ls + 1),
                         test[length_test % BATCH_SIZE:][i:i + Tau + Ls + 1,
                                                         j],
                         color='silver',
                         label='original')
                plt.plot(range(i, i + Tau),
                         testX[i, :, j],
                         color='dodgerblue',
                         label='input')
                plt.scatter(i + Tau + Ls,
                            predicted[i, j],
                            s=15,
                            color='orange',
                            label='forecast')
                plt.legend()
            plt.draw()
            plt.pause(1.2)
            plt.clf()
            i += 1
        plt.close()

    end = time.time()
    print('elapsed_time: {}[s]'.format(end - start))
#Dense connect all neyrons from this layer to all neyrons from next layer
model.add(Dense(800, input_dim=784, init="normal", activation="relu"))
model.add(Dense(10, init="normal", activation="softmax"))

model.compile(loss="categorical_crossentropy",
              optimizer="SGD",
              metrics=["accuracy"])
#optimizer="SGD" stohastic gradient decend, method of learning
#loss="categorical_crossentropy" - error by category
#metrics=["accuracy"] - optimization metric is a accuracy
#metrics=["mae"] - percent of right answers of data set

#mse - mean squared error
#mae - mean absolute error

print(model.summary())

#obuchaem setb | learning
model.fit(X_train, y_train, batch_size=200, np_epoch=100, verbose=1)
#batch_size - number of batch stohastic
#verbose - diagnostic info when model is training66666666666666669
#validation_split=0.2 - 20% for validation set

predictions = model.predict(X_train)

#convert output data to a single number
predictions = np_utils.categorical_probas_to_classes(predictions)

score = model.evaluate(X_test, y_test, verbose=0)
print('Accurate: ', score[1] * 100)