class Actor(BasePolicy):
    """
    Actor network (policy) for TD3.

    :param obs_dim: (int) Dimension of the observation
    :param action_dim: (int) Dimension of the action space
    :param net_arch: ([int]) Network architecture
    :param activation_fn: (str or tf.activation) Activation function
    """
    def __init__(self,
                 obs_dim,
                 action_dim,
                 net_arch,
                 activation_fn=tf.nn.relu):
        super(Actor, self).__init__(None, None)

        actor_net = create_mlp(obs_dim,
                               action_dim,
                               net_arch,
                               activation_fn,
                               squash_out=True)
        self.mu = Sequential(actor_net)
        self.mu.build()

    @tf.function
    def call(self, obs):
        return self.mu(obs)
Esempio n. 2
0
class LM_Loss(object):
    def __init__(self):
        self.leung_malik = LeungMalik()
        self.filter_bank = self.leung_malik.f
        self.lm_model = Sequential(
            layers=Conv2D(self.filter_bank.shape[-1], (
                self.filter_bank.shape[0], self.filter_bank.shape[1]),
                          padding="same",
                          activation="linear",
                          use_bias=False,
                          kernel_initializer=self.kernel_initializer,
                          data_format="channels_last",
                          input_shape=(None, None, 1)))
        for layer in self.lm_model.layers:
            layer.trainable = False
        self.lm_model.build()
        self.LossFunction = MeanSquaredError()
        # self.LossFunction = CosineSimilarity()

    def kernel_initializer(self, input_shape, dtype=None):
        return np.expand_dims(self.leung_malik.make_filters().astype(
            np.float32),
                              axis=2)

    def loss(self, y_true, y_pred):
        y_true = self.lm_model(y_true)
        y_pred = self.lm_model(y_pred)
        return self.LossFunction(y_true, y_pred)
Esempio n. 3
0
def train_model(run_dir,hparams):

    hp.hparams(hparams)
    [X_train, y_train] = create_data(data_unscaled=data, start_train=start_train, end_train=end_train, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])
    [X_test, y_test] = create_data(data_unscaled=data, start_train=end_train, end_train=end_test, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])

    tf.compat.v1.keras.backend.clear_session()
    model = Sequential()
    model.add(TimeDistributed(Masking(mask_value=0., input_shape=(hparams[HP_WINDOW], n_inputs+1)), input_shape=(n_company, hparams[HP_WINDOW], n_inputs+1)))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh', return_sequences=True, input_shape=(hparams[HP_WINDOW], n_inputs+1), kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1), dropout=hparams[HP_DROPOUT] ,recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=True ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(Dense(units=20, activation='softmax'))
    model.compile(optimizer=hparams[HP_OPTIMIZER], loss='categorical_crossentropy', metrics=['accuracy']) #get_weighted_loss(weights=weights)) # metrics=['mae'])
    model.build(input_shape=(None, n_company, hparams[HP_WINDOW], n_inputs+1))
    model.summary()
    #model.fit(X_train, to_categorical(y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0],20), epochs=100, batch_size=batch_size, validation_data=(X_test, to_categorical(y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0],20)))
    model.fit(X_train, to_categorical(y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0],20), epochs=1000, batch_size=batch_size, validation_data=(X_test, to_categorical(y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0],20)), callbacks=[
        TensorBoard(log_dir=run_dir, histogram_freq=50, write_graph=True, write_grads=True, update_freq='epoch'),
        hp.KerasCallback(writer=run_dir, hparams=hparams)])
    model.save('model_' + str(hparams[HP_NUM_UNITS]) + '_' + str(hparams[HP_DROPOUT]) + '_' + str(hparams[HP_OPTIMIZER]) + '_' + str(hparams[HP_WINDOW]) + '_' + str(hparams[HP_OUTPUT]) + '.h5')

    pd.DataFrame(np.argmax(model.predict(X_test),axis=2)).to_csv('pred.csv')
    pd.DataFrame(np.reshape(np.transpose(model.predict(X_test),axes=(1,0,2)), (X_test.shape[0]*X_test.shape[1],20))).to_csv('pred_weights.csv')

    return 0
Esempio n. 4
0
def LeCunLeNet5(input_shape, num_classes):
    """LeCunLeNet-5 network built with Keras
    As for LeCun's origin LeNet-5, **the activation function is after the pooling layer**, and the activation function used is sigmoid.
    Inputs:
        input_shape: input shape of the element of the batched data, e.g., (32, 32, 3), (28, 28, 1).
        num_classes: number of top classifiers, e.g., 2, 10.
        attention: attention type, one of["official", "senet"], default None.
    """
    model = Sequential()

    model.add(Input(shape=input_shape))
    model.add(Conv2D(filters=6, kernel_size=(
        5, 5), padding="valid", name="conv2d_1"))
    model.add(MaxPool2D(strides=2))
    model.add(Activation("sigmoid"))
    model.add(Conv2D(filters=16, kernel_size=(
        5, 5), padding="valid", name="conv2d_2"))
    model.add(MaxPool2D(strides=2))
    model.add(Activation("sigmoid"))
    model.add(Flatten())
    model.add(Dense(120, activation="sigmoid", name="dense_1"))
    model.add(Dense(84, activation="sigmoid", name="dense_2"))
    model.add(Dense(num_classes, activation='softmax', name="dense_3"))

    model.build()

    return model
Esempio n. 5
0
class DiffusionTf():
    def __init__(self, dx, dt, L, time, eta = 0.01):
        # Setting up data
        self._Nx = int(L / dx) + 1
        self._Nt = int(time / dt) + 1

        self._x_np = np.linspace(0, L, self._Nx)
        self._t_np = np.linspace(0, time, self._Nt)

        X, T = np.meshgrid(self._x_np, self._t_np)

        x = X.ravel()
        t = T.ravel()

        self._zeros = tf.reshape(tf.convert_to_tensor(np.zeros(x.shape)), shape=(-1,1))
        self._x = tf.reshape(tf.convert_to_tensor(x), shape=(-1,1))
        self._t = tf.reshape(tf.convert_to_tensor(t), shape=(-1,1))

        # Setting up model
        self._model = Sequential()
        self._model.add(Dense(20, activation='sigmoid'))
        self._model.add(Dense(1, activation="linear"))
        self._model.build(tf.concat([self._x, self._t], 1).shape)

        self._optimizer = optimizers.Adam(lr = eta)
    
    def _g_trial(self):
        return (1 - self._t) * tf.sin(np.pi * self._x) + self._x * (1 - self._x) * self._t * self._model(tf.concat([self._x, self._t], 1))
    
    def _loss(self):
        with tf.GradientTape() as tape_x2:
            tape_x2.watch([self._x])
            with tf.GradientTape() as tape_x, tf.GradientTape() as tape_t:
                tape_x.watch([self._x])
                tape_t.watch([self._t])
                g_trial = self._g_trial()

            dg_dx = tape_x.gradient(g_trial, self._x)
            dg_dt = tape_t.gradient(g_trial, self._t)

        dg_d2x = tape_x2.gradient(dg_dx, self._x)

        return tf.losses.mean_squared_error(self._zeros, dg_d2x - dg_dt)
        
    def train(self, iters):
        # Training the model bu calculating gradient
        for i in range(iters):
            with tf.GradientTape() as tape:
                current_loss = self._loss()

            grads = tape.gradient(current_loss, self._model.trainable_variables)
            self._optimizer.apply_gradients(zip(grads, self._model.trainable_variables))
            
        # Output of model
        return self._x_np, self._t_np, np.array(self._g_trial()).reshape((self._Nt, self._Nx)).T
        
    def output(self):
        # Output of model
        return self._x_np, self._t_np, np.array(self._g_trial()).reshape((self._Nt, self._Nx)).T
Esempio n. 6
0
def test_dict_to_model():
    model = Sequential()
    model.build((1, ))

    dict_model = serialization.model_to_dict(model)

    recovered = serialization.dict_to_model(dict_model)
    assert recovered.to_json() == model.to_json()
Esempio n. 7
0
class DeepCNN(Sequential):

    #this is not a very extensive list right now but let us only require that the user specify the bare
    #minimum. We'll make this model more flexible in the future
    def __init__(self, shape, loss_class, **kwargs):
        super(DeepCNN, self).__init__()
        #make sure RGB images have 3 channels at the end as input
        self.shape = shape

        self.masked_loss = loss_class.wrapper()
        self.optimizer = kwargs.pop('optimizer', 'adam')
        self.CNN_metrics = kwargs.pop('metrics', 'accuracy')

        #initialize the model from tensorflow. Vanilla sequential
        self.model = Sequential()

        #add first layer, convolution layer: 64 layers dense, 3x3 Kernel
        self.model.add(
            Conv2D(64, (20, 20),
                   padding='same',
                   input_shape=self.shape,
                   name='layer1'))
        self.model.add(Activation('relu'))

        #add second layer
        #why don't I need to initialize these arrays with a bunch of values? (apparently it defaults)
        self.model.add(Conv2D(64, (15, 15), padding='same', name='layer2'))
        self.model.add(Activation('relu'))

        #3rd layer
        #try initializing the arrays within the add method
        self.model.add(Conv2D(64, (15, 15), padding='same', name='layer3'))
        self.model.add(Activation('relu'))

        #4th layer
        self.model.add(Conv2D(64, (10, 10), padding='same', name='layer4'))
        self.model.add(Activation('relu'))

        #5th layer
        self.model.add(Conv2D(64, (6, 6), padding='same', name='layer5'))
        self.model.add(Activation('relu'))

        #6th layer
        self.model.add(Conv2D(64, (4, 4), padding='same', name='layer6'))
        self.model.add(Activation('relu'))

        #7th layer
        self.model.add(Dense(256))
        self.model.add(Activation('relu'))

        #7th layer
        self.model.add(Dense(3))

        self.model.compile(loss='binary_crossentropy',
                           optimizer=self.optimizer,
                           metrics=['accuracy'])

        self.model.build()
Esempio n. 8
0
def build_model(input_shape):
    model = Sequential()
    cell = ElmanCell(10, 20)
    model.add(RNN(cell, input_shape=input_shape[1:]))
    model.add(Softmax())

    model.build()

    return model
 def __init__(self):
   model = Sequential()
   # very simple 1 layer network.
   model.add(Flatten(input_shape=(72, 96, 4)))
   model.add(Dense(19, activation='softmax'))
   model.build()
   model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
   model.summary()
   self._model = model
Esempio n. 10
0
def make_model():
    learning_rate = 0.001
    model = Sequential()
    model.add(Dense(128,activation='relu'))
    model.add(Dense(64,activation='relu'))
    model.add(Dense(34,activation='softmax'))
    opt = tf.optimizers.RMSprop(learning_rate= learning_rate)

    model.build(input_shape=(1,405)) # 405 is the size of bow representation
    return model
Esempio n. 11
0
    def _make_block(self, layers: Tuple[int]) -> tf.keras.Model:
        dense_layers = [_make_layer(size) for size in layers]

        input_layers = [Flatten()]
        output_layers = [Dense(self._output_size, activation=softmax)]

        layers = input_layers + dense_layers + output_layers
        model = Sequential(layers)

        model.build(input_shape=[None, self._image_height, self._image_width])

        return model
Esempio n. 12
0
def model2(n1, num_classes):
    model = Sequential()
    model.add(
        Conv1D(filters=num_kernels,
               kernel_size=(k1),
               input_shape=(n1, 1),
               activation='tanh',
               padding='valid'))
    model.add(MaxPooling1D(pool_size=(k2)))
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(Dense(n4, activation='tanh'))
    model.add(Dense(num_classes, activation='softmax'))
    model.build()
    return model
Esempio n. 13
0
def T_learning_classifier(input_shape, num_classes):
    model = Sequential()
    model.add(Input(shape=input_shape))
    model.add(
        Conv2D(num_classes, (1, 1),
               strides=1,
               padding='same',
               activation=None,
               use_bias=False))
    model.add(BatchNormalization())
    model.add(GlobalAveragePooling2D())
    model.add(Activation('softmax'))
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.build(input_shape)
    return model
Esempio n. 14
0
    def _create_model(self):
        """Create model"""
        neurons = self.neurons
        activations = self.activations
        n_features = self.n_features_
        n_outputs = self.n_outputs_

        model = Sequential()

        for size, activation in zip(neurons, activations):
            model.add(Dense(units=size, activation=activation))
        model.add(Dense(n_outputs))
        model.build((None, n_features))

        model.compile(loss=self._nv_loss(self.cu_, self.co_),
                      optimizer=self.optimizer)

        return model
Esempio n. 15
0
def speech_segmentation_model(pre_trained_path, load_weights= True):
    """
    Load pretrained model
    """
    model_name = 'speech_seg'
    json_model_file = os.path.join(pre_trained_path, model_name + '.json')
    h5_model_file = os.path.join(pre_trained_path, model_name + '.h5')

    model = Sequential()
    model.add(Bidirectional(LSTM(128, return_sequences= True)))
    model.add(Bidirectional(LSTM(128, return_sequences= True)))
    model.add(TimeDistributed(Dense(32)))
    model.add(TimeDistributed(Dense(32)))
    model.add(TimeDistributed(Dense(1, activation= 'sigmoid')))

    model.build(input_shape= (None, 200, 35))
    # model.summary()

    model.load_weights(h5_model_file)
    return model
Esempio n. 16
0
def mlp(embeddings: np.ndarray) -> (tf.keras.Model, str):
    config = nns_config['mlp']

    model = Sequential()
    model.add(
        SumEmbeddings(embeddings=embeddings,
                      input_length=MAX_LENGTH,
                      trainable_embeddings=TRAINABLE_EMBEDDINGS))

    for u in config['dense_units']:
        model.add(Dense(u, activation='relu', kernel_regularizer=l2(0.0001)))
        model.add(Dropout(config['dense_dropout']))

    model.add(Dense(2, activation='softmax'))

    model.compile(optimizer='adam',
                  loss=BinaryCrossentropy(),
                  metrics=['accuracy'])
    model.build((None, MAX_LENGTH))
    print(model.summary())
    return model, config['model_name']
Esempio n. 17
0
    def initializeModel(self):
        base = VGG16(include_top=False, input_shape=(224, 224, 3))
        model = Sequential()
        model.add(base)
        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dense(11, activation='softmax'))
        # model.add(Dropout(0.2))
        # model.add(Dense(11, kernel_regularizer=regularizers.l2(0.005), activation='softmax'))

        # use frozen model
        for layer in base.layers:
            layer.trainable = False

        model.layers[0].trainable = False

        model.compile(loss='categorical_crossentropy', optimizer='adam',
                    metrics=['accuracy'])
        model.build(input_shape=(None, 224, 224, 3))
        # define new model
        model.summary()
        return model
Esempio n. 18
0
def build_model(input_shape, n_layers, filters, kernel_size, pool_size,
                activation, n_classes, dropout, learning_rate, loss):
    rows, cols = input_shape
    layers = []
    for i in range(n_layers - 1):
        layers.append(
            tf.keras.layers.Conv2D(filters=filters,
                                   kernel_size=kernel_size,
                                   activation=activation))
        layers.append(tf.keras.layers.MaxPooling2D(pool_size=pool_size))
    layers.append(
        tf.keras.layers.Conv2D(filters=1,
                               kernel_size=(1, 1),
                               activation=activation))
    layers.append(tf.keras.layers.Flatten())
    layers.append(tf.keras.layers.Dense(32, activation='relu'))
    if dropout:
        layers.append(tf.keras.layers.Dropout(dropout))
    layers.append(tf.keras.layers.Dense(n_classes, activation='softmax'))
    model = Sequential(layers)
    model.build((None, rows, cols, 1))
    opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=opt, loss=loss
                  #metrics = [auroc]
                  )
    session = tf.compat.v1.keras.backend.get_session()
    # Reinitializing model everytime build_model is called
    for layer in model.layers:
        if hasattr(layer, 'cell'):
            init_container = layer.cell
        else:
            init_container = layer
        for key, initializer in init_container.__dict__.items():
            if "initializer" not in key:  #is this item an initializer?
                continue  #if no, skip it
            var = getattr(init_container, key.replace("_initializer", ""))
            var.assign(initializer(var.shape, var.dtype))
    return model
Esempio n. 19
0
def LeNet5(input_shape, num_classes):
    """LeNet-5 network built with Keras
    Inputs:
        input_shape: input shape of the element of the batched data, e.g., (32, 32, 3), (28, 28, 1).
        num_classes: number of top classifiers, e.g., 2, 10.
        attention: attention type, one of["official", "senet"], default None.
    """
    model = Sequential()

    model.add(Input(shape=input_shape))
    model.add(Conv2D(filters=6, kernel_size=(5, 5),
                     padding="valid", activation="relu", name="conv2d_1"))
    model.add(MaxPool2D(strides=2, name="max_pooling2d_1"))
    model.add(Conv2D(filters=16, kernel_size=(5, 5),
                     padding="valid", activation="relu", name="conv2d_2"))
    model.add(MaxPool2D(strides=2, name="max_pooling2d_2"))
    model.add(Flatten(name="flatten"))
    model.add(Dense(120, activation="relu", name="dense_1"))
    model.add(Dense(84, activation="relu", name="dense_2"))
    model.add(Dense(num_classes, activation='softmax', name="dense_3"))

    model.build()

    return model
def train_model(run_dir,hparams):

    hp.hparams(hparams)
    [X_train, y_train] = create_data(data_unscaled=data, start_train=start_train, end_train=end_train, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])
    [X_test, y_test] = create_data(data_unscaled=data, start_train=end_train, end_train=end_test, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])

    tf.compat.v1.keras.backend.clear_session()
    model = Sequential()
    model.add(TimeDistributed(Masking(mask_value=0., input_shape=(hparams[HP_WINDOW], n_inputs+1)), input_shape=(n_company, hparams[HP_WINDOW], n_inputs+1)))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh', return_sequences=True, input_shape=(hparams[HP_WINDOW], n_inputs+1), kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1), dropout=hparams[HP_DROPOUT] ,recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=True ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(Dense(units=1, activation='linear'))
    model.compile(optimizer=hparams[HP_OPTIMIZER], loss='mse') #get_weighted_loss(weights=weights)) # metrics=['mae'])
    model.build(input_shape=(None, n_company, hparams[HP_WINDOW], n_inputs+1))
    model.summary()
    #model.fit(X_train, y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0], epochs=1, batch_size=batch_size, validation_data=(X_test, y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0]))
    model.fit(X_train, y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0], epochs=200, batch_size=batch_size, validation_data=(X_test, y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0]), callbacks=[
        TensorBoard(log_dir=run_dir, histogram_freq=10, write_graph=True, write_grads=True, update_freq='epoch'),
        hp.KerasCallback(writer=run_dir, hparams=hparams)])
    model.save('model_' + str(hparams[HP_NUM_UNITS]) + '_' + str(hparams[HP_DROPOUT]) + '_' + str(hparams[HP_OPTIMIZER]) + '_' + str(hparams[HP_WINDOW]) + '_' + str(hparams[HP_OUTPUT]) + '.h5')

    return 0
# method 3
class MLP(Model):
    def __init__(self):
        super().__init__()
        self.dense = Dense(200, activation='relu')
        self.out = Dense(10, activation='softmax')

    def call(self, x):
        x = self.dense(x)
        y = self.out(x)
        return y


model = MLP()
# note: use (None,784) rather (784,) which is used in the layer (check method 1)
model.build(input_shape=(None, 784))
model.summary()
'''
4. compile a model
'''
criterion = tf.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
model.compile(optimizer=optimizer, loss=criterion, metrics=['accuracy'])
'''
5. train and evaluate a model
'''
# method1 use built-in functions
model.fit(x_train, y_train, epochs=2, batch_size=100)
loss, accuracy = model.evaluate(x_test, y_test)
print("loss is {}, accuracy is {}".format(loss, accuracy))
Esempio n. 22
0
    Conv3D(4,
           kernel_size=2,
           padding="valid",
           dilation_rate=3,
           activation='relu'))
model.add(
    Conv3D(4,
           kernel_size=3,
           padding="valid",
           dilation_rate=2,
           activation='relu'))
model.add(GlobalAveragePooling3D())
model.add(Flatten())
model.add(Dense(num_classes))

model.build(input_shape=(d, h, w, ch))  # For keras2onnx

model.compile(loss='mse', optimizer="adam")

model.summary()

# Training
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)

# Evaluation
mse = model.evaluate(x_test, y_test)
print("Evaluation result: Loss:", mse)

# In case of providing output metric file, store the test mse value
if args.output_metric != "":
    with open(args.output_metric, 'w') as ofile:
Esempio n. 23
0
model.add(layer=keras.layers.BatchNormalization(
    axis=1, center=True, scale=True, name="BatchNorm"))
model.add(layer=keras.layers.Dense(32, activation="relu", name="layer2"))
model.add(layer=keras.layers.Dropout(rate=0.2))
model.add(layer=keras.layers.Dense(16, activation="relu", name="layer3"))
model.add(
    layer=keras.layers.Dense(1, activation="sigmoid", name="OutputLayer"))

model.compile(
    loss=tf.keras.losses.BinaryCrossentropy(
        from_logits=True),  # more numerically stable
    optimizer=tf.keras.optimizers.Adam(
        learning_rate=0.01),  # The famous Adam optimizer
    metrics=["accuracy"])

model.build(input_shape=[7172, 10])

# Get a snapshot of the model built:
model.summary()  # DON'T SKIP ME!!!!

# Train the model:
history = model.fit(
    x=np.array(x_train),
    y=np.array(y_train),
    validation_split=0.2,  # 80/20 split
    verbose=1,
    epochs=50,  # Notice that r-square is pretty low for the first 5 epochs
    callbacks=[callback])  # Early stopping

tf_preds = model.predict(x=x_test)
Esempio n. 24
0
class EigenSolver():
    def __init__(self, A, x0, dt, T, eta=0.01):
        self._A = A
        N = x0.shape[0]
        x0 /= np.linalg.norm(x0)  # normalize
        self._x0 = tf.reshape(
            tf.convert_to_tensor(x0, dtype=tf.dtypes.float64),
            shape=(1, -1))  # row vector, since the NN outputs row vectors

        Nt = int(T / dt) + 1
        self._t_arr = np.linspace(0, T, Nt)
        self._t = tf.reshape(tf.convert_to_tensor(self._t_arr,
                                                  dtype=tf.dtypes.float64),
                             shape=(-1, 1))  # column vector

        self._zeros = tf.convert_to_tensor(np.zeros((N, Nt)))

        # Setting up model
        self._model = Sequential()
        self._model.add(Dense(100, activation='sigmoid'))
        self._model.add(Dense(50, activation='sigmoid'))
        self._model.add(Dense(25, activation='sigmoid'))
        self._model.add(Dense(N, activation="linear"))
        self._model.build(self._t.shape)

        self._optimizer = optimizers.Adam(eta)

    def _x_net(self):
        return tf.exp(-self._t) @ self._x0 + self._model(
            self._t) * (1 - tf.exp(-self._t))

    def _loss(self):
        with tf.GradientTape() as tape_t:
            tape_t.watch([self._t])
            x_net = self._x_net()
        dx_dt = tape_t.batch_jacobian(
            x_net, self._t
        )[:, :,
          0]  # This takes the gradient of each element of x for each time step

        dx_dt = tf.transpose(
            dx_dt
        )  # We need to transpose, as x_net is a collection of row vectors,
        x_net = tf.transpose(
            x_net
        )  # but we need a collection of column vectors for the matrix multiplications

        Ax = self._A @ x_net
        xTx = tf.einsum("ij,ji->i", tf.transpose(x_net), x_net)
        xTAx = tf.einsum("ij,ji->i", tf.transpose(x_net), Ax)
        fx = xTx * Ax + (1 - xTAx) * x_net

        return tf.losses.mean_squared_error(self._zeros, dx_dt - fx + x_net)

    def train(self, iters):
        start_time = time.time()
        # Training the model by calculating gradient
        for i in range(iters):
            with tf.GradientTape() as tape:
                current_loss = self._loss()

            grads = tape.gradient(current_loss,
                                  self._model.trainable_variables)
            self._optimizer.apply_gradients(
                zip(grads, self._model.trainable_variables))
        total_time = time.time() - start_time
        print(
            f"Finished training with a loss of {np.mean(self._loss())} after {total_time//60:.0f}m {total_time%60}s."
        )

    def output(self):
        # Output of model
        return self._t_arr, self._x_net()

    def evaluate(self, biggest=1):
        # Extracting eigenvector and value
        t, x_t = self.output()
        eig_vec = np.array(x_t[-1, :]) / np.linalg.norm(np.array(x_t[-1, :]))
        eig_val = np.mean(self._A @ eig_vec / eig_vec)
        eig_val_std = np.std(self._A @ eig_vec / eig_vec)

        # Analytical eigenvectors and values
        eigenvalues, v = np.linalg.eig(self._A)
        eig_index = np.argmax(eigenvalues)
        if biggest == 1:
            eig_val_anal = eigenvalues[eig_index]
        else:
            eig_val *= -1
            eig_val_anal = -eigenvalues[eig_index]
        eig_vec_anal = v[:, eig_index]
        eig_vec_anal *= np.sign(
            eig_vec[0] *
            eig_vec_anal[0])  # makes eigenvectors not point opposite direction

        print(f"Eigenvalue = {eig_val:.5f} +- {eig_val_std:.5f}")
        print(
            f"Real eigen = {eig_val_anal:5f}, diff = {eig_val - eig_val_anal:.5f}"
        )
        print(f"Eigenvector =   {eig_vec}")
        print(f"Real eigenvec = {eig_vec_anal}")

        plt.figure()
        plt.xlabel("t")
        plt.ylabel("x(t)")
        for i in range(len(eig_vec)):
            plt.plot(t,
                     x_t[:, i] / np.linalg.norm(x_t, axis=1),
                     label=rf"$x_{i+1}$")
        plt.gca().set_prop_cycle(None)
        for i in range(len(eig_vec)):
            plt.plot([t[-1] + 0.2], eig_vec_anal[i], marker="D", markersize=4)
        plt.legend()
model.add( Dense(1800, activation='relu', activity_regularizer=REGULARIZATION) )
model.add( Dropout(DROPOUT) )
model.add( BatchNormalization() )

model.add( Dense(INPUT_LEN, activation='relu', activity_regularizer=REGULARIZATION) )
model.add( Dropout(DROPOUT) )
model.add( BatchNormalization() )

model.add( Dense(OUTPUT_LEN, activation='linear') )

model.compile(loss="mse",
                optimizer=OPTIMIZER,
                metrics=['mae'])

model.build(trainX.shape)
model.load_weights('experiment-3/checkpoints/Bidirectional-LSTM-GRU-250.h5')

value_to_predict = 1500

sampleX = np.asarray([testX[value_to_predict]]) # Input
sampleXClosingValue = sampleX[0][1] # Input closing values
sampleY = testY[value_to_predict] # Output closing values truth
prediction = model.predict(sampleX)[0] # predicted output closing values

# Plot prediction
fig,ax=plt.subplots()

# Primary Axis Labels
ax.set_xlabel("Time")
ax.set_ylabel("Closing Price")
print("Train data shape:", x_train.shape)
print("Train labels shape:", y_train.shape)
print("Test data shape:", x_test.shape)
print("Test labels shape:", y_test.shape)

model = Sequential()
model.add(Input(shape=(28, 28, 1), name="linput"))
model.add(Conv2D(16, 5, padding="same", dilation_rate=2, activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(16, 3, padding="same", dilation_rate=3, activation="relu"))
model.add(Conv2D(16, 2, padding="valid", dilation_rate=4, activation="relu"))
model.add(GlobalAveragePooling2D())
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.build(input_shape=(28, 28, 1))  # For keras2onnx

model.compile(loss='categorical_crossentropy',
              optimizer="adam",
              metrics=['accuracy'])

model.summary()

# Training
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)

# Evaluation
res = model.evaluate(x_test, y_test)
print("Evaluation result: Loss:", res[0], " Accuracy:", res[1])

# In case of providing output metric file, store the test accuracy value
Esempio n. 27
0
print("Test labels shape:", y_test.shape)

model = Sequential()
model.add(Input(shape=(784, 1), name="linput"))
model.add(Conv1D(16, 3, activation="relu"))
model.add(MaxPooling1D(2, 2))
model.add(Conv1D(16, 3, activation="relu"))
model.add(MaxPooling1D(2, 2))
model.add(Conv1D(16, 3, activation="relu"))
model.add(MaxPooling1D(2, 2))
model.add(Conv1D(16, 3, activation="relu"))
model.add(MaxPooling1D(2, 2))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.build(input_shape=(784, 1))  # For keras2onnx

model.compile(loss='categorical_crossentropy',
              optimizer="adam",
              metrics=['accuracy'])

model.summary()

# Training
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)

# Evaluation
res = model.evaluate(x_test, y_test)
print("Evaluation result: Loss:", res[0], " Accuracy:", res[1])

# In case of providing output metric file, store the test accuracy value
Esempio n. 28
0
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras.models import load_model,Sequential
from tensorflow.keras.preprocessing.image import img_to_array,load_img
import numpy as np


model_url="https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/classification/4"

model=Sequential([hub.KerasLayer(model_url)])

model.build(input_shape=[None,160,160,3])

print(model.summary())


def get_top_5(image):
    x=img_to_array(image)
    x=x[np.newaxis,...]
    pred = model.predict(x)

    return pred

image = load_img("images/Strawberry-Tuxedo-Cake-4-768x1024.jpg", target_size=(160, 160))

pred = get_top_5(image)

with open("data/imagenet_categories.txt") as f:

    categories = f.read().splitlines()
Esempio n. 29
0
from tensorflow.keras.models import Sequential
''' =========== Learning Setting =========== '''
exp_name = 'LSTM_MNIST'
CONTINUE_LEARNING = False

train_ratio = 0.8
train_batch_size, test_batch_size = 64, 64

epochs = 40
save_period = 2
learning_rate = 0.001

model = Sequential()
model.add(LSTM(128))
model.add(Dense(10, activation='softmax'))
model.build(input_shape=(None, 28, 28))
print(model.summary())
optimizer = Adam(learning_rate=learning_rate)
loss_object = SparseCategoricalCrossentropy()
''' =========== Training Setting =========== '''
path_dict = dir_setting(exp_name, CONTINUE_LEARNING)
model, losses_accs, start_epoch = continue_setting(CONTINUE_LEARNING,
                                                   path_dict, model)

train_ds, valid_ds, test_ds = load_preprocessing_mnist_for_rnn(
    train_ratio, train_batch_size, test_batch_size)
print(train_ds)
metric_objects = get_classification_metrics()

for epoch in range(start_epoch, epochs):
    train_step(train_ds, model, loss_object, optimizer, metric_objects)
Esempio n. 30
0
tensorboard_callback_bov = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

unit_dense_1 = 200
unit_dense_2 = 100
num_batches = 32
num_epochs = 100

model = Sequential([
    Input((401, )),
    Dense(unit_dense_1, kernel_regularizer='l2'),
    Dense(unit_dense_2, kernel_regularizer='l2'),
    Dense(1, activation='sigmoid')
])

model.build((None, 101))
model.summary()

model.compile(optimizer='nadam',
              loss=tf.losses.BinaryCrossentropy(),
              metrics=['accuracy'])

history = model.fit(X_train_concatenated,
                    Y_train_encoded,
                    batch_size=num_batches,
                    epochs=num_epochs,
                    validation_data=(X_validate_concatenated,
                                     Y_validate_encoded),
                    validation_steps=30,
                    callbacks=[
                        callback_early_stopping, callback_adaptive_lr,