Ejemplo n.º 1
0
 def testKerasModel(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   event = self.keras_model(name='my_name', data=model, step=1)
   first_val = event.summary.value[0]
   self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
Ejemplo n.º 2
0
 def testKerasModel(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   summary = self.keras_model(name='my_name', data=model, step=1)
   first_val = summary.value[0]
   self.assertEqual(model.to_json(), first_val.tensor.string_val[0])
Ejemplo n.º 3
0
    def __init__(self):
        self.model = Sequential([
            Dense(40, input_shape=(4, ), activation="relu"),
            Dense(40, activation="relu"),
            Dense(40, activation="relu"),
            Dense(4, activation="tanh")
        ])
        # TODO: xavier initialization?
        self.model.compile(loss=keras.losses.mean_squared_error,
                           optimizer=keras.optimizers.Adam(lr=0.001))

        self.memory = deque(maxlen=10000)
Ejemplo n.º 4
0
 def testKerasModel_usesDefaultStep(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   try:
     summary_ops.set_step(42)
     event = self.keras_model(name='my_name', data=model)
     self.assertEqual(42, event.step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
Ejemplo n.º 5
0
  def testKerasModel_otherExceptions(self):
    model = Sequential()

    with test.mock.patch.object(model, 'to_json') as mock_to_json:
      with test.mock.patch.object(logging, 'warn') as mock_log:
        mock_to_json.side_effect = Exception('oops')
        self.assertFalse(
            summary_ops.keras_model(name='my_name', data=model, step=1))
        self.assertRegex(
            str(mock_log.call_args),
            'Model failed to serialize as JSON. Ignoring... oops')
Ejemplo n.º 6
0
def fit(x_train, y_train, plot=True):
    #Data preprocessing
    if plot:
        display_data(x_train, y_train)
    x_train = x_train / 255.0
    x_train = x_train.reshape([-1, 784])
    y_train = one_hot(y_train)
    model = Sequential([
        Dense(512, input_dim=784),
        Activation('relu'),
        Dense(512),
        Activation('relu'),
        Dense(512),
        Activation('relu'),
        Dense(10),
        Activation('softmax')
    ])
    optimiser = keras.optimizers.Adam(learning_rate=0.01)
    model.compile(
        loss='categorical_crossentropy',  #Loss function for one hot inputs
        optimizer=optimiser,
        metrics=['accuracy'],
    )
    model.fit(x_train, y_train, validation_split=0.2, batch_size=64, epochs=20)
    return model
    def __init__(self, input_space, action_space):

        #HyperParameters
        self.GAMMA = 0.95
        self.LEARNING_RATE = 0.002
        self.MEMORY_SIZE = 1000000
        self.BATCH_SIZE = 30
        self.EXPLORATION_MAX = 1.0
        self.EXPLORATION_MIN = 0.01
        self.EXPLORATION_DECAY = 0.997
        self.exploration_rate = self.EXPLORATION_MAX
        self.reward = 0

        self.actions = action_space
        #Experience Replay
        self.memory = deque(maxlen=self.MEMORY_SIZE)

        #Create the NN model
        self.model = Sequential()
        self.model.add(
            Dense(64, input_shape=(input_space, ), activation="relu"))
        self.model.add(Dense(64, activation="relu"))
        self.model.add(Dense(self.actions, activation="softmax"))
        self.model.compile(loss="mse", optimizer=Adam(lr=self.LEARNING_RATE))
Ejemplo n.º 8
0
 def fitting(self, model: Sequential) -> Sequential:
     model.compile(
         optimizer="adam",
         loss="sparse_categorical_crossentropy",
         metrics=["accuracy"],
     )
     model.fit(self.data_source.x_train, self.data_source.y_train, epochs=10)
     # loss, accuracyを出力してくれる
     model.evaluate(self.data_source.x_test, self.data_source.y_test, verbose=2)
     return model
 def fitting(self, model: Sequential) -> Sequential:
     # 予測値はロジットや対数オッズ比で出力される
     predictions = model(self.data_source.x_train[:1]).numpy()
     # 確率に変換
     probability = tf.nn.softmax(predictions).numpy()
     # 損失関数。下記の書き方をすればそれぞれの標本についてクラスごとに損失のスカラを返す
     loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
         from_logits=True)
     # loss確認する場合はコメント外す
     # loss = loss_fn(mnist.y_train[:1], predictions).numpy()
     model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
     model.fit(self.data_source.x_train, self.data_source.y_train, epochs=5)
     model.evaluate(self.data_source.x_test,
                    self.data_source.y_test,
                    verbose=2)
     return model
Ejemplo n.º 10
0
class Model:
    __batch_size = 100

    def __init__(self):
        self.model = Sequential([
            Dense(40, input_shape=(4, ), activation="relu"),
            Dense(40, activation="relu"),
            Dense(40, activation="relu"),
            Dense(4, activation="tanh")
        ])
        # TODO: xavier initialization?
        self.model.compile(loss=keras.losses.mean_squared_error,
                           optimizer=keras.optimizers.Adam(lr=0.001))

        self.memory = deque(maxlen=10000)

    def experience_replay(self):
        if len(self.memory) < self.__batch_size:
            return

        batch = random.sample(self.memory, self.__batch_size)
        states = np.vstack([state for state, _, _, _, _ in batch])
        next_states = np.vstack(
            [next_state for _, _, _, next_state, _ in batch])

        predicted_states = self.model.predict(states)
        predicted_next_states = self.model.predict(next_states)
        max_nex_state_values = np.max(predicted_next_states, 1)

        for index, (_, action, reward, _, terminal) in enumerate(batch):
            q_update = reward

            if not terminal:
                discount_factor = 0.95
                q_update += discount_factor * max_nex_state_values[index]

            learning_rate = 0.95
            predicted_states[index][action] = (
                (1 - learning_rate) * predicted_states[index][action] +
                learning_rate * q_update)
        self.model.fit(states, predicted_states, verbose=0)
Ejemplo n.º 11
0
    def seq_lstm(self, y_cols_idx=[-1]):
        from tensorflow.keras import Sequential
        from tensorflow.keras.layers import Dense, LSTM, Dropout

        len_outputs = len(self.y_cols)
        len_features = len(self.x_cols)

        regressior = Sequential()

        regressior.add(
            LSTM(
                units=60,
                activation="relu",
                return_sequences=True,
                input_shape=(self.x_train.shape[1], len_features),
            )
        )
        regressior.add(Dropout(0.2))

        regressior.add(LSTM(units=120, activation="relu", return_sequences=True))
        regressior.add(Dropout(0.2))

        regressior.add(LSTM(units=240, activation="relu", return_sequences=True))
        regressior.add(Dropout(0.2))

        regressior.add(LSTM(units=240, activation="relu", return_sequences=True))
        regressior.add(Dropout(0.2))

        regressior.add(LSTM(units=120, activation="tanh"))
        regressior.add(Dropout(0.2))

        regressior.add(Dense(units=len_outputs))

        self.model = regressior
        # self.save("my_model")
        # print(self.model.summary())
        # print("model done")
        return regressior
        pass
class NNQ:
    def __init__(self, input_space, action_space):

        #HyperParameters
        self.GAMMA = 0.95
        self.LEARNING_RATE = 0.002
        self.MEMORY_SIZE = 1000000
        self.BATCH_SIZE = 30
        self.EXPLORATION_MAX = 1.0
        self.EXPLORATION_MIN = 0.01
        self.EXPLORATION_DECAY = 0.997
        self.exploration_rate = self.EXPLORATION_MAX
        self.reward = 0

        self.actions = action_space
        #Experience Replay
        self.memory = deque(maxlen=self.MEMORY_SIZE)

        #Create the NN model
        self.model = Sequential()
        self.model.add(
            Dense(64, input_shape=(input_space, ), activation="relu"))
        self.model.add(Dense(64, activation="relu"))
        self.model.add(Dense(self.actions, activation="softmax"))
        self.model.compile(loss="mse", optimizer=Adam(lr=self.LEARNING_RATE))

    def act(self, state):
        #Exploration vs Exploitation
        if np.random.rand() < self.exploration_rate:
            return random.randrange(self.actions)

        q_values = self.model.predict(state)

        return np.argmax(q_values[0])

    def remember(self, state, action, reward, next_state, done):
        #in every action put in the memory
        self.memory.append((state, action, reward, next_state, done))

    def experience_replay(self):
        #When the memory is filled up take a batch and train the network
        if len(self.memory) < self.MEMORY_SIZE:
            return

        batch = random.sample(self.memory, self.BATCH_SIZE)
        for state, action, reward, next_state, terminal in batch:
            q_update = reward
            if not terminal:
                q_update = (
                    reward +
                    self.GAMMA * np.amax(self.model.predict(next_state)[0]))
            q_values = self.model.predict(state)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)

        if self.exploration_rate > self.EXPLORATION_MIN:
            self.exploration_rate *= self.EXPLORATION_DECAY
Ejemplo n.º 13
0
from tensorflow.python.keras.layers import LSTM, RepeatVector, TimeDistributed, Dense

from src.helpers.helper_classifier import generate_data, invert

n_samples = 200000
n_numbers = 2
largest = 10000
alphabet = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '+', ' ']
n_chars = len(alphabet)
n_in_seq_length = n_numbers * ceil(log10(largest + 1)) + n_numbers - 1
n_out_seq_length = ceil(log10(n_numbers * (largest + 1)))
n_batch = 100
n_epoch = 500
model = Sequential([
    LSTM(100, input_shape=(n_in_seq_length, n_chars)),
    RepeatVector(n_out_seq_length),
    LSTM(50, return_sequences=True),
    TimeDistributed(Dense(n_chars, activation='softmax'))
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
for i in range(n_epoch):
    x, y = generate_data(n_samples, largest, alphabet)
    model.fit(x, y, epochs=1, batch_size=n_batch)

model.save('training/keras_classifier.h5')

# evaluate on some new patterns
x, y = generate_data(n_samples, largest, alphabet)
result = model.predict(x, batch_size=n_batch, verbose=0)
def build_model():
    """
        Function that build the CNN + LSTM network
    """
    with tf.name_scope('CNN_LSTM'):
        model = Sequential()

        with tf.name_scope('Conv1'):
            model.add(
                TimeDistributed(Convolution2D(16, (5, 5),
                                              padding='same',
                                              strides=(2, 2)),
                                input_shape=(15, 16, 3200, 1),
                                name='Conv1'))

        model.add(BatchNormalization())
        model.add(Activation('relu'))

        with tf.name_scope('Conv2'):
            model.add(
                TimeDistributed(
                    Convolution2D(32, (5, 5),
                                  padding='same',
                                  strides=(1, 1),
                                  name='Conv2')))
            model.add(Activation('relu'))

        with tf.name_scope('Pooling'):
            model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))

        with tf.name_scope('Conv3'):
            model.add(
                TimeDistributed(
                    Convolution2D(32, (5, 5),
                                  padding='same',
                                  strides=(1, 1),
                                  name='Conv3')))
            model.add(Activation('relu'))

        with tf.name_scope('Conv4'):
            model.add(
                TimeDistributed(
                    Convolution2D(32, (5, 5),
                                  padding='same',
                                  strides=(1, 1),
                                  name='Conv4')))
            model.add(Activation('relu'))

        with tf.name_scope('Pooling'):
            model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))

        with tf.name_scope('FC1'):
            model.add(TimeDistributed(Flatten(), name='FC1'))
            model.add(Activation('relu'))

            model.add(TimeDistributed(Dropout(0.25)))

        with tf.name_scope('FC2'):
            model.add(TimeDistributed(Dense(256), name='FC2'))
            model.add(Activation('relu'))

            model.add(TimeDistributed(Dropout(0.25)))

        with tf.name_scope('LSTM'):
            model.add(tf.keras.layers.CuDNNLSTM(64, return_sequences=False))
            model.add(Dropout(0.5))

        with tf.name_scope('OutputLayer'):
            model.add(Dense(2, activation='softmax'))

    with tf.name_scope('Optimizer'):
        optimizer = optimizers.adam(lr=1e-4, decay=1e-5)

    with tf.name_scope('Loss'):
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
Ejemplo n.º 15
0
def build_model(use_gpu: bool = False,
                num_units: int = 64,
                num_layers: int = 1,
                dropout_rate: float = 0.0,
                batch_size: int = 1000,
                window_size: int = 10,
                num_params: int = 0):
    """
    Builds the RNN-Model for character prediction.

    :param window_size: Sequence size
    :param batch_size: {int} Size of batch
    :param dropout_rate: {float} Regulating Dropout rate between layers
    :param num_layers: {int} Number of layers to build
    :param num_units: {int} Number of LSTM-Units to use in network
    :param use_gpu: {bool} Uses Tensorflow GPU support if True, otherwise trains on CPU
    :param num_params: {int} Number of control parameters
    :return: Keras model
    """

    # Load max 5000 entries from the dataset to build the Tokenizer / vocabulary
    loader = Loader(min(batch_size, 5000), 0)
    tokenizer = Tokenizer(filters='', split='°', lower=False)

    for dataframe in loader:

        chars = set()

        for name in dataframe['name']:
            chars.update(set(str(name)))

        tokenizer.fit_on_texts(list(chars))

    tokenizer.fit_on_texts(['pre', '<end>', 'pad'])

    # Build Keras Model
    model = Sequential()
    for r in range(0, max(num_layers - 1, 0)):
        model.add(layer=(CuDNNLSTM if use_gpu else LSTM
                         )(num_units,
                           input_shape=(window_size,
                                        len(tokenizer.index_word) + 1 +
                                        num_params),
                           return_sequences=True))
        model.add(Dropout(dropout_rate))

    model.add(
        layer=(CuDNNLSTM if use_gpu else LSTM)(num_units,
                                               input_shape=(
                                                   window_size,
                                                   len(tokenizer.index_word) +
                                                   1 + num_params)))
    model.add(Dense(len(tokenizer.index_word) + 1, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Show summary
    print(model.summary())

    return model, tokenizer
Ejemplo n.º 16
0
print('단어 카운트:', token.word_counts)
print('문장 카운트:', token.document_count)
print('각 단어가 몇개의 문장에 포함되어 있는가 :', token.word_docs)
print('각 단어에 매겨진 인덱스 값 :', token.word_index)

print()
# 텍스트를 읽고 긍정 , 부정 분류 예측 

docs = ['너무 재밌네요', '최고에요','참 잘만든 영화예요','추천하고 싶은 영화네요','한번 더 보고싶네요',
        '글쎄요','별로네요','생각보다 지루합니다','연기가 좋지않아요','재미없어요']

import numpy as np 
classes = np.array([1,1,1,1,1,0,0,0,0,0])

token = Tokenizer()
token.fit_on_texts(docs)
print(token.word_index)

model = Sequential()
model.add(Embedding(word_size,8,input_length=4))
#model.add(Flatten())
model.add(LSTM(32))
model.add(Dense(1,activation='sigmoid'))

print(model.summary())
model.compile(optimizer='adam',loss='binary_crossentropy')




Ejemplo n.º 17
0
    smooth_predictions = []
    for row in y_pred:
        smooth_predictions.append(np.argmax(row))
    return smooth_predictions


if __name__ == '__main__':
    x_train, x_test, y_train, y_test = get_data()

    #Sequential model used since each layer has exactly one input and one output tensor
    model = Sequential([
        Dense(512, input_dim=784),  #
        Activation('relu'),  #Activation function
        Dense(512),  #512 output neurons
        Activation(
            'relu'
        ),  #Replaces negative values with zero and keeps positive values
        Dense(512),
        Activation('relu'),
        Dense(10),
        Activation('softmax')  #Normalisation
    ])

    #Could change this optimiser
    #Adam optimisation is a stochastic gradient descent method
    #Can handle sparse gradients on noisy problems
    optimiser = keras.optimizers.Adam(learning_rate=0.01)

    #Training configuration
    model.compile(
        loss='categorical_crossentropy',  #Loss function for one hot inputs
        optimizer=optimiser,