def create_dqn():
    # Creation of a 2 layer Neural Network
    nn = Sequential()
    nn.add(Dense(36, input_dim=OBSERVATION_SPACE_DIMS, activation='tanh'))
    nn.add(Dense(28, activation='relu'))
    nn.add(Dense(len(ACTION_SPACE), activation='linear'))
    nn.compile(loss='mse', optimizer=Adam(lr=ALPHA, decay=ALPHA_DECAY))
    return nn
Exemple #2
0
    def build_model(self):
        model = Sequential()
        # Input layer and hidden layer 1. kernel_initializer gives random values to weights according to specified dist.
        model.add(
            Dense(128,
                  input_dim=self.state_size,
                  activation='relu',
                  kernel_initializer='he_uniform'))
        # Hidden layer 2
        model.add(Dense(64, activation='relu'))
        # Output layer
        model.add(Dense(self.action_size, activation='relu'))

        # Compile the model
        # model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate, decay=0.00001))
        model.compile(loss='mse',
                      optimizer=Adam(lr=self.learning_rate, decay=0.0))
        return model
def create_model(layers, activation, input_dim, output_dim):
    '''
    Builds and compiles a Keras Sequential model based on the given
    parameters.

    :param layers: [hiddenlayer1_nodes,hiddenlayer2_nodes,...]
    :param activation: e.g. relu
    :param input_dim: number of input nodes
    :return: Keras model
    '''
    model = Sequential()
    for i, nodes in enumerate(layers):
        if i == 0:
            model.add(Dense(nodes, input_dim=input_dim, activation=activation))
        else:
            model.add(Dense(nodes, activation=activation))
    model.add(Dense(output_dim, activation='linear'))
    model.compile(loss='mse', optimizer='adam')

    return model
Exemple #4
0
 def define_gan(g_model, d_model):
     # make weights in the discriminator not trainable
     d_model.trainable = False
     # connect them
     model = Sequential()
     # add generator
     model.add(g_model)
     # add the discriminator
     model.add(d_model)
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss='binary_crossentropy', optimizer=opt)
     return model
 def __init__(self, regularization=0.01):
     super(SiameseEncoder, self).__init__()
     self.inplanes = 64
     # Siamese branch.
     self.siamese = Sequential([
         Conv2D(64,
                7,
                strides=2,
                padding='same',
                use_bias=False,
                kernel_regularizer=regularizers.l2(regularization)),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     # Merged main branch.
     self.mainstream = Sequential([
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     self.bn = BatchNormalization()
     self.leaky_relu = LeakyReLU()
    def _make_resblock(self,
                       n_blocks,
                       n_filters,
                       strides=1,
                       regularization=0.01):
        """Build Residual blocks from BottleneckResidualUnit layers.

    Args:
      n_blocks: [BATCH, HEIGHT, WIDTH, 3] input source images.
      n_filters: (int) the number of filters.
      strides: (int)  the strides of the convolution.
      regularization: (float) l2 regularization coefficient.

    Returns:
     [BATCH, 1, 1, 1024] image embeddings.
    """
        layers = []
        if strides != 1 or self.inplanes != n_filters * BottleneckResidualUnit.expansion:
            downsample = Conv2D(n_filters * BottleneckResidualUnit.expansion,
                                1,
                                strides=strides,
                                padding='same',
                                use_bias=False)
        else:
            downsample = None
        self.inplanes = n_filters * BottleneckResidualUnit.expansion
        layers.append(
            BottleneckResidualUnit(n_filters,
                                   strides,
                                   downsample,
                                   regularization=regularization))
        for _ in range(1, n_blocks):
            layers.append(
                BottleneckResidualUnit(n_filters,
                                       1,
                                       regularization=regularization))
        return Sequential(layers)
Exemple #7
0
from config_9x9 import yy_price, y_train_price, y_test_price, y_val_price, ub_price, lb_price, diff_price, bound_sum_price
from config_9x9 import y_train_trafo_price, y_val_trafo_price, y_test_trafo_price
from config_9x9 import y_train_trafo1_price, y_val_trafo1_price, y_test_trafo1_price
from config_9x9 import y_train_trafo2_price, y_val_trafo2_price, y_test_trafo2_price
from config_9x9 import vega_train, vega_test, vega_val
# import custom functions #scaling tools
from config_9x9 import ytransform, yinversetransform, myscale, myinverse

#custom errors
from add_func_9x9 import root_mean_squared_error, root_relative_mean_squared_error, mse_constraint, rmse_constraint
#else
from add_func_9x9 import constraint_violation, pricing_plotter, plotter_autoencoder

tf.compat.v1.keras.backend.set_floatx('float64')

NN1a = Sequential()
NN1a.add(InputLayer(input_shape=(
    Nparameters,
    1,
    1,
)))
NN1a.add(ZeroPadding2D(padding=(2, 2)))
NN1a.add(
    Conv2D(32, (3, 1),
           padding='valid',
           use_bias=True,
           strides=(1, 1),
           activation='elu'))  #X_train_trafo.shape[1:],activation='elu'))
NN1a.add(ZeroPadding2D(padding=(3, 1)))
NN1a.add(
    Conv2D(32, (2, 2),
# dense_layers = [0, 1, 2]
# layer_sizes = [32, 64, 128]
# conv_layers = [1, 2, 3]

dense_layers = [0, 1, 2]
layer_sizes = [4, 8, 16]
conv_layers = [1, 2]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = f'Pneumonia-{IMG_SIZE}px-{NUM_SAMPLES}samples-{conv_layer}conv-{layer_size}nodes-{dense_layer}dense-{int(time.time())}'
            tensorboard = TensorBoard(log_dir=f'logs/{NAME}')
            print(NAME)

            model = Sequential()
            # format: Num of filters, window/step, dimensions
            model.add(Conv2D(layer_size, (3, 3),
                             input_shape=x_train.shape[1:]))
            model.add(Activation("relu"))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            print('Layer 0 generated')

            for i in range(conv_layer - 1):
                print(f'Layer {i + 1} generated.')
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation("relu"))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())
            for l in range(dense_layer):
Exemple #9
0
def build_lstm_model(input_data, output_size, neurons=20, activ_func='linear',
                     dropout=0.25, loss='mae', optimizer='adam'):
    model = Sequential()
    model.add(CuDNNLSTM(neurons, input_shape=(input_data.shape[1], input_data.shape[2]), return_sequences=True))
    model.add(Dropout(dropout))
    model.add(CuDNNLSTM(neurons, input_shape=(input_data.shape[1], input_data.shape[2])))
    model.add(Dropout(dropout))
    model.add(Dense(units=output_size))
    model.add(Activation(activ_func))

    model.compile(loss=loss, optimizer=optimizer)
    return model
Exemple #10
0
    def Train(self):
        # self.loadDataFeature()
        self.loadDataTxt()
        self.train_and_test_split(0.75)
        # model
        model = Sequential()

        # model.add(Dense(392, activation='relu'))
        # model.add(Dense(128, activation='relu'))
        # model.add(Dense(36, activation='softmax'))

        #cnn model

        model.add(
            Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(36, activation='softmax'))

        # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(
            self.train_data['data'],
            self.train_data['class_name'],
            batch_size=25,
            epochs=100,
            verbose=1,
            validation_data=(self.test_data['data'],
                             self.test_data['class_name']),
        )
        self.model = model
        model.save('digit_classification_model1.h5')
        # Y_pred = model.predict(self.test_data['data'])
        # self.metric(self.test_data['class_name'], Y_pred, data_type='binary')
        self.metric()
Exemple #11
0
def create_lstm():
    # create the model
    embedding_vecor_length = 32
    model = Sequential(name="lstm")
    # model.name = 'lstm'
    model.add(
        Embedding(top_words,
                  embedding_vecor_length,
                  input_length=max_review_length))
    model.add(
        Conv1D(filters=32, kernel_size=3, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=2))
    model.add(LSTM(10, name="lstm1", return_sequences=True))
    model.add(LSTM(32, name="lstm2", return_sequences=True))
    model.add(LSTM(64, name="lstm3", return_sequences=True))
    model.add(LSTM(128, name="lstm4", return_sequences=True))
    model.add(LSTM(48, name="lstm5"))
    model.add(Dense(1, activation="sigmoid"))
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Exemple #12
0
idx = tokenizer.word_index
inverse_map = dict(zip(idx.values(), idx.keys()))


def tokens_to_string(tokens):
    # Map from tokens back to words.
    words = [inverse_map[token] for token in tokens if token != 0]

    # Concatenate all words.
    text = " ".join(words)

    return text


#Create the RNN
model = Sequential()
embedding_size = 8
model.add(
    Embedding(input_dim=num_words,
              output_dim=embedding_size,
              input_length=max_tokens,
              name='layer_embedding'))
model.add(GRU(units=16, return_sequences=True))
model.add(GRU(units=8, return_sequences=True))
model.add(GRU(units=4))
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=1e-3)

model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
Exemple #13
0
 def define_discriminator(in_shape=(32, 32, 3)):
     model = Sequential()
     # normal
     model.add(Conv2D(64, (3, 3), padding='same', input_shape=in_shape))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # classifier
     model.add(Flatten())
     model.add(Dropout(0.4))
     model.add(Dense(1, activation='sigmoid'))
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss='binary_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])
     return model
Exemple #14
0
 def define_generator(latent_dim):
     model = Sequential()
     # foundation for 4x4 image
     n_nodes = 256 * 4 * 4
     model.add(Dense(n_nodes, input_dim=latent_dim))
     model.add(LeakyReLU(alpha=0.2))
     model.add(Reshape((4, 4, 256)))
     # upsample to 8x8
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 16x16
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 32x32
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # output layer
     model.add(Conv2D(3, (3, 3), activation='tanh', padding='same'))
     return model
Exemple #15
0
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
X_train = sc_x.fit_transform(X_train)
X_test = sc_x.transform(X_test)

# Importing the Keras libraries and packages
import tensorflow.compat.v1 as tf
import keras
from tensorflow.compat.v1.keras.models import Sequential  # To initialise the neural networks
from tensorflow.compat.v1.keras.layers import Dense  # to create layers in neural networks

# create your classifier here
# Initialising the ANN
classifier = Sequential()
"""
AttributeError: module 'tensorflow' has no attribute 'get_default_graph' 
= due to using wrong version of tensprflow

from tensorflow.compat.v1.keras.models import Sequential
use this
classifier = tf.keras.models.Sequential()
"""

X_train.shape
# Adding the input layer and the first hidden layer
classifier.add(Dense(units=6, activation='relu', input_dim=9))

# Adding second hidden layer
classifier.add(Dense(units=6, activation='relu'))
    def __init__(self, n_out, regularization=0.01):
        """Initialize the DirectionNet.

    Args:
      n_out: (int) the number of output distributions.
      regularization: L2 regularization factor for layer weights.
    """
        super(DirectionNet, self).__init__()
        self.encoder = SiameseEncoder()
        self.inplanes = self.encoder.inplanes
        self.decoder_block1 = Sequential([
            Conv2D(256,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 128, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block2 = Sequential([
            Conv2D(128,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 64, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block3 = Sequential([
            Conv2D(64,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 32, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block4 = Sequential([
            Conv2D(32,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 16, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block5 = Sequential([
            Conv2D(16,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 8, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block6 = Sequential([
            Conv2D(8,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 4, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.down_channel = Conv2D(
            n_out, 1, kernel_regularizer=regularizers.l2(regularization))
Exemple #17
0
def fizzbuzz(i):
    if i % 15 == 0: return np.array([0, 0, 0, 1], dtype=np.float32)
    elif i % 5 == 0: return np.array([0, 0, 1, 0], dtype=np.float32)
    elif i % 3 == 0: return np.array([0, 1, 0, 0], dtype=np.float32)
    else: return np.array([1, 0, 0, 0], dtype=np.float32)


def bin(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)], dtype=np.float32)


NUM_DIGITS = 7
trX = np.array([bin(i, NUM_DIGITS) for i in range(1, 101)])
trY = np.array([fizzbuzz(i) for i in range(1, 101)])
model = Sequential()
model.add(Dense(64, input_dim=7))
model.add(Activation('tanh'))
model.add(Dense(4, input_dim=64))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(trX, trY, epochs=3600, batch_size=64)
model.save('fizzbuzz_model.h5')


def representative_dataset_gen():
    for i in range(100):
        yield [trX[i:i + 1]]
def get_seq_model():
  """Define three channel input shape depending on image data format."""
  if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
  else:
    input_shape = (img_width, img_height, 3)

  # Initialize CNN by creating a sequential model.
  model = Sequential()
  model.add(Conv2D(32, (3, 3), input_shape=input_shape))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(32, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(64, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Flatten())
  model.add(Dense(64))
  model.add(Activation('relu'))
  model.add(Dropout(0.5))
  model.add(Dense(2))
  model.add(Activation('sigmoid'))

  model.compile(
      loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

  return model