Example #1
0
    def __init__(self, cfg=Config()):
        super(CVAE, self).__init__()
        self.cfg = cfg
        self.width = cfg.img_shape[0]
        self.height = cfg.img_shape[1]

        self.inference_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=cfg.img_shape),
            layers.Conv2D(cfg.filters, 3, 2, 'same', activation='relu'),
            layers.Conv2D(cfg.filters*2, 3, 2, 'same', activation='relu'),
            layers.Flatten(),
            layers.Dense(cfg.z_dim+cfg.z_dim)
        ])
        self.generative_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=(cfg.z_dim)),
            layers.Dense(self.width//4*self.height//4*cfg.filters*2, activation='relu'),
            layers.Reshape([self.width//4, self.height//4, cfg.filters*2]),
            layers.Conv2DTranspose(cfg.filters*2, 3, 2, 'same', activation='relu'),
            layers.Conv2DTranspose(cfg.filters, 3, 2, 'same', activation='relu'),
            layers.Conv2DTranspose(1, 3, 1, 'same')
        ])
        self.label_net = tf.keras.Sequential([
            tf.keras.layers.InputLayer(input_shape=(10,)),
            tf.keras.layers.Dense(cfg.z_dim)
        ]) 
    def __init__(self, latent_dim):
        super(CVAE, self).__init__()
        self.latent_dim = latent_dim
        self.encoder = tf.keras.Sequential(
            [
                layers.InputLayer(input_shape=(28, 28, 1)),
                layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
                layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
                layers.Flatten(),
                # No activation
                layers.Dense(latent_dim + latent_dim),
            ]
        )

        self.decoder = tf.keras.Sequential(
            [
                layers.InputLayer(input_shape=(latent_dim,)),
                layers.Dense(units=7*7*32, activation=tf.nn.relu),
                layers.Reshape(target_shape=(7, 7, 32)),
                layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', activation='relu'),
                layers.Conv2DTranspose(filters=32, kernel_size=3, strides=2, padding='same', activation='relu'),
                # No activation
                layers.Conv2DTranspose(filters=1, kernel_size=3, strides=1, padding='same'),
            ]
        )
Example #3
0
def create_seq_model(nodes=[],
                     weight_init="glorot_normal",
                     hidden_activation="relu",
                     opt_name="adam",
                     metric_list=["acc"],
                     output_activation="softmax",
                     loss_name="categorical_crossentropy"):
    # Create Sequential Model
    model = Sequential()

    if nodes != []:
        # Create Input Layer
        model.add(layers.InputLayer(input_shape=nodes[0], name="input"))

        # Create Hidden Layers
        for i in range(1, len(nodes) - 1):
            hidden_name = "hidden_{}".format(i)
            model.add(
                layers.Dense(units=nodes[i],
                             kernel_initializer=weight_init,
                             activation=hidden_activation,
                             name=hidden_name))

        # Create Output Layers
        model.add(
            layers.Dense(units=nodes[-1],
                         kernel_initializer=weight_init,
                         activation=output_activation,
                         name="output"))

        # Compile Neural Network
        model.compile(optimizer=opt_name, loss=loss_name, metrics=metric_list)

    # Return built model
    return model
 def create_model(self):
     self.model = tf.keras.Sequential([
         layers.InputLayer(input_shape=(self.num_of_frames,
                                        self.frame_size)),
         layers.Conv1D(128, kernel_size=3),
         layers.Conv1D(128, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Conv1D(256, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Conv1D(512, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Flatten(),
         layers.Dense(512),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.Dense(256),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.Dense(self.num_of_classes, activation='softmax')
     ])
Example #5
0
    def get_compiled_model(self):
        model = models.Sequential([
            layers.InputLayer(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)),
            layers.Conv2D(16, (3, 3), activation='relu', padding='same'),
            layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
            layers.MaxPool2D(2),
            layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
            layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
            layers.MaxPool2D(2),
            layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
            layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
            layers.MaxPool2D(2),
            layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
            layers.UpSampling2D(2),
            layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
            layers.UpSampling2D(2),
            layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
            layers.UpSampling2D(2),
            layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
            layers.Conv2D(16, (3, 3), activation='relu', padding='same'),
            layers.Conv2D(2, (3, 3), activation='relu', padding='same'),
        ])
        model.compile(
            optimizer=optimizers.Adam(),
            loss='mse',
            # run_eagerly=True,
            # metrics=['accuracy']
        )

        return model
Example #6
0
def define_model(nchan, L, Fs):
    model = tf.keras.Sequential()
    model.add(layers.InputLayer((L, nchan), batch_size=1))
    model.add(layers.LayerNormalization(axis=[1, 2], center=False,
                                        scale=False))
    model.add(
        MorletConvRaw([L, nchan],
                      Fs,
                      input_shape=[L, nchan, 1],
                      etas=etas,
                      wtime=wtime))
    model.add(
        layers.Conv2D(filters=filters,
                      kernel_size=[1, nchan],
                      activation='elu'))
    model.add(layers.Permute((3, 1, 2), name="second_permute"))
    model.add(
        layers.AveragePooling2D(pool_size=(1, 71),
                                strides=(1, 15),
                                name="pooling"))
    model.add(layers.Dropout(0.75))
    model.add(layers.Flatten())
    model.add(layers.Dense(3))
    model.add(layers.Activation('softmax'))
    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'],
                  run_eagerly=False)
    return model
Example #7
0
def define_1DCNN(nchan, L, Fs):
    model = tf.keras.Sequential()
    model.add(layers.InputLayer((L, nchan), batch_size=None))
    model.add(layers.Conv1D(filters=30, kernel_size=64, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.2))
    model.add(layers.Conv1D(filters=15, kernel_size=32, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv1D(filters=10, kernel_size=16, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.4))
    model.add(layers.Flatten())
    model.add(layers.Dense(15, activation="tanh"))
    model.add(layers.LayerNormalization())
    model.add(layers.Dense(3))
    model.add(layers.Activation('softmax'))
    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'],
                  run_eagerly=False)
    return model
Example #8
0
def cnn_network(opts):
    model = models.Sequential()
    model.add(
        layers.InputLayer(input_shape=(opts['siz'], opts['siz'], 1),
                          name='input'))
    model.add(
        layers.Conv2D(opts['F1N'], (opts['F1S'], opts['F1S']),
                      padding='same',
                      name='conv_1',
                      activation='relu'))
    model.add(layers.BatchNormalization(name='BN_1'))
    model.add(layers.MaxPooling2D(pool_size=2, strides=2))
    model.add(
        layers.Conv2D(opts['F2N'], (opts['F2S'], opts['F2S']),
                      strides=2,
                      padding='same',
                      name='conv_2',
                      activation='relu'))
    model.add(layers.BatchNormalization(name='BN_2'))
    model.add(layers.MaxPooling2D(pool_size=2, strides=2))
    model.add(
        layers.Conv2D(opts['F3N'], (opts['F3S'], opts['F3S']),
                      padding='same',
                      name='conv_3',
                      activation='relu'))
    model.add(layers.BatchNormalization(name='BN_3'))
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu', name='dense_1'))
    model.add(layers.Dense(2, name='dense_2', activation='softmax'))
    model.compile(
        optimizer=tf.keras.optimizers.SGD(learning_rate=opts['lr'],
                                          momentum=0.9),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    return model
Example #9
0
    def fit(self, X, y=None):
        kernel_size = self.kernel_size
        filters = self.filters
        optimizer = self.optimizer
        epochs = self.epochs

        # Define input shapes
        self.n_steps = X.shape[1]

        X = X.reshape((X.shape[0], X.shape[1], self.n_features))

        self.labels, ids = np.unique(y, return_inverse=True)
        y_cat = to_categorical(ids)
        num_classes = y_cat.shape[1]

        self.model = Sequential()
        self.model.add(layers.InputLayer(input_shape=(self.n_steps, self.n_features)))
        self.model.add(layers.Conv1D(100, 6, activation='relu'))
        self.model.add(layers.Conv1D(100, 6, activation='relu'))
        self.model.add(layers.MaxPooling1D(3))
        self.model.add(layers.Conv1D(160, 6, activation='relu'))
        self.model.add(layers.Conv1D(160, 6, activation='relu'))
        self.model.add(layers.GlobalAveragePooling1D(name='G_A_P_1D'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(num_classes))
        self.model.add(layers.Activation('softmax'))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=["categorical_accuracy"])
        self.model.fit(X, y_cat, epochs=epochs, verbose=False)
Example #10
0
def get_model(bidirectional = False, seqModelType = "SimpleRNN", RNNunits = 32):
    model = keras.Sequential()
    model.add(layers.InputLayer(input_shape=(None,s)))

    if seqModelType == "HMM":
        seqLayer = HMMLayer(5, 15) # (10,15) is better than (5,11)
    elif seqModelType == "LSTM":
        seqLayer = layers.LSTM(RNNunits)
    elif seqModelType == "GRU":
        seqLayer = layers.GRU(RNNunits)
    elif seqModelType == "SimpleRNN":
        seqLayer = layers.SimpleRNN(RNNunits)
    else:
        sys.exit("unknown sequence model type " + seqModelType)

    if bidirectional:
        seqLayer = layers.Bidirectional(seqLayer)
    
    model.add(seqLayer)
    model.add(layers.Dense(1, activation='sigmoid'))
    lr = 1e-3
    #if seqModelType == "HMM":
    #    lr = 1e-2
    print (f"lr={lr}")
    model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = lr),
                  loss = tf.keras.losses.BinaryCrossentropy(), metrics = ["accuracy"])
    return model
def print_model_architecture():
    input_layer = layers.InputLayer(input_shape=[FEATURES_COUNT, 32, 1],
                                    batch_size=BATCH_SIZE)

    model = create_model(input_layer)

    print(model.summary())
Example #12
0
def rnn(n_outputs, window_size):
    # 1-layer, basic model, doesn't work very well...

    m = Sequential()
    m.add(layers.InputLayer(input_shape=(window_size, 1)))

    m.add(
        layers.SimpleRNN(units=64,
                         activation='tanh',
                         use_bias=True,
                         kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal',
                         bias_initializer='zeros',
                         kernel_regularizer=None,
                         recurrent_regularizer=None,
                         bias_regularizer=None,
                         activity_regularizer=None,
                         kernel_constraint=None,
                         recurrent_constraint=None,
                         bias_constraint=None,
                         dropout=0.5,
                         recurrent_dropout=0.2,
                         return_sequences=False,
                         return_state=False,
                         go_backwards=False,
                         stateful=False,
                         unroll=False))
    m.add(layers.BatchNormalization(name='batch_norm_1'))

    # last layer

    m.add(layers.Dense(n_outputs, activation='sigmoid', name='output'))

    return m
Example #13
0
    def __init__(self, input_shape, nclass):
        super(AlexNet, self).__init__()
        self.inputlayer = k_layers.InputLayer(input_shape=input_shape, name='inputlayer')
        self.conv1 = k_layers.Conv2D(8, (3, 3), strides=(1, 1), name='conv1', padding='same', activation='relu')
        self.conv2 = k_layers.Conv2D(16, (3, 3), strides=(1, 1), name='conv2', padding='same', activation='relu')
        self.conv3 = k_layers.Conv2D(32, (3, 3), strides=(1, 1), name='conv3', padding='same', activation='relu')
        self.conv4 = k_layers.Conv2D(64, (3, 3), strides=(1, 1), name='conv4', padding='same', activation='relu')
        self.conv5 = k_layers.Conv2D(64, (3, 3), strides=(1, 1), name='conv5', padding='same', activation='relu')

        self.maxpool1 = k_layers.MaxPool2D(name='maxpool1', strides=(2, 2))
        self.maxpool2 = k_layers.MaxPool2D(name='maxpool2', strides=(2, 2))
        self.maxpool3 = k_layers.MaxPool2D(name='maxpool3', strides=(2, 2))
        self.maxpool4 = k_layers.MaxPool2D(name='maxpool4', strides=(2, 2))
        self.maxpool5 = k_layers.MaxPool2D(name='maxpool5', strides=(2, 2))

        self.bnlze1 = k_layers.BatchNormalization(name='bn1')
        self.bnlze2 = k_layers.BatchNormalization(name='bn2')
        self.bnlze3 = k_layers.BatchNormalization(name='bn3')
        self.bnlze4 = k_layers.BatchNormalization(name='bn4')
        self.bnlze5 = k_layers.BatchNormalization(name='bn5')

        self.flt = k_layers.Flatten()
        self.fc1 = k_layers.Dense(64, name='fc1', activation='relu')
        self.fc2 = k_layers.Dense(128, name='fc2', activation='relu')
        self.fc3 = k_layers.Dense(nclass, name='outlayer', activation='softmax')
Example #14
0
 def __init__(self, input_shape):
     self.model = models.Sequential([
         layers.InputLayer(input_shape=input_shape),
         layers.experimental.preprocessing.Rescaling(1. / 255),
         # layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
         # layers.experimental.preprocessing.RandomRotation(0.5),
         # layers.experimental.preprocessing.RandomContrast(0.5),
         # layers.experimental.preprocessing.RandomTranslation(height_factor=0.3, width_factor=0.3),
         # layers.experimental.preprocessing.RandomZoom(height_factor=0.3),
         # layers.experimental.preprocessing.RandomCrop(128,128),
     ])  #use preprocessing to make dataset larger
     #128x128x3
     self.model.add(layers.Conv2D(8, 13, activation='relu'))
     self.model.add(layers.BatchNormalization())
     self.model.add(layers.Dropout(0.2))
     #116x116x8
     self.model.add(layers.AveragePooling2D(pool_size=2))
     #58x58x8
     self.model.add(layers.Conv2D(16, 11, activation='relu'))
     #48x48x16
     self.model.add(layers.AveragePooling2D(pool_size=2))
     #24x24x16
     self.model.add(layers.Flatten())
     self.model.add(layers.Dense(128, activation='relu'))
     self.model.add(layers.Dense(2, activation='softmax'))
     self.optimizer = optimizers.Adam(lr=0.001)
     self.loss = losses.CategoricalCrossentropy()
     self.model.compile(loss=self.loss,
                        optimizer=self.optimizer,
                        metrics=['accuracy'])
Example #15
0
 def decoder_x(self):
     layers = [
         tfkl.InputLayer(input_shape=(self.latent_dim, )),
         # Expand the dimensions using Dense layer and prepare for de-convolution
         tfkl.Dense(7 * 7 * 32, activation='relu'),
         # Reshape the flattened representation into a 3D, so that de-convolution
         # can be successfully applied
         tfkl.Reshape(target_shape=(7, 7, 32)),
         tfkl.Conv2DTranspose(filters=64,
                              kernel_size=3,
                              strides=2,
                              padding='same',
                              activation='relu'),
         tfkl.Conv2DTranspose(filters=32,
                              kernel_size=3,
                              strides=2,
                              padding='same',
                              activation='relu'),
         # note that no activation is required in the final layer as we will
         # take care of that in the loss function.
         tfkl.Conv2DTranspose(filters=1,
                              kernel_size=3,
                              strides=1,
                              padding='same')
     ]
     return tfk.Sequential(layers)
Example #16
0
def CNN(shape, num_classes: int):
    """Returns a CNN model used in cot
    """
    model = models.Sequential()
    model.add(layers.InputLayer(input_shape=shape))
    model.add(layers.Conv2D(128, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(128, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(128, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D((2, 2), strides=2))
    model.add(layers.Dropout(rate=0.25))

    model.add(layers.Conv2D(256, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(256, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(256, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D((2, 2), strides=2))
    model.add(layers.Dropout(rate=0.25))

    model.add(layers.Conv2D(512, (3, 3), activation=lrelu, padding="valid"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(256, (3, 3), activation=lrelu, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.Conv2D(128, (3, 3), activation=lrelu, padding="same"))
    model.add(layers.BatchNormalization())

    model.add(layers.AveragePooling2D(pool_size=1, padding="valid"))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation=tf.nn.relu))
    model.add(layers.Dense(num_classes, activation=tf.nn.softmax))
    return model
Example #17
0
 def __init__(self):
     (self.train_X, self.train_y), (self.test_X,
                                    self.test_y) = mnist.load_data()
     self.image_input_layer = layers.InputLayer(input_shape=(28, 28, 1))
     self.convolution2d_layer = layers.Conv2D(
         20,
         use_bias=True,
         strides=(1, 1),
         activation='relu',
         padding='same',
         kernel_size=(5, 5),
         kernel_initializer=initializers.random_normal(mean=0,
                                                       stddev=0.01,
                                                       seed=None))
     self.max_pooling2d_layer = layers.MaxPooling2D(strides=(2, 2),
                                                    pool_size=(2, 2),
                                                    padding='valid')
     self.fully_connected_layer1 = layers.Dense(
         100,
         use_bias=True,
         activation='relu',
         kernel_initializer=initializers.random_normal(mean=0,
                                                       stddev=0.01,
                                                       seed=None))
     self.fully_connected_layer2 = layers.Dense(10,
                                                use_bias=True,
                                                activation='softmax')
     self.optimizer = SGD(momentum=0.9, lr=0.001)
     self.history_list = list()
     self.score_list = list()
Example #18
0
def get_decoder():
    config = {
        "kernel_size": 3,
        "strides": 1,
        "padding": "same",
        "activation": "relu"
    }
    decoder = keras.Sequential([
        layers.InputLayer((None, None, 512)),
        layers.Conv2D(filters=512, **config),
        layers.UpSampling2D(),
        layers.Conv2D(filters=256, **config),
        layers.Conv2D(filters=256, **config),
        layers.Conv2D(filters=256, **config),
        layers.Conv2D(filters=256, **config),
        layers.UpSampling2D(),
        layers.Conv2D(filters=128, **config),
        layers.Conv2D(filters=128, **config),
        layers.UpSampling2D(),
        layers.Conv2D(filters=64, **config),
        layers.Conv2D(
            filters=3,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="sigmoid",
        ),
    ])
    return decoder
Example #19
0
def build_model(train_ds):
    """
    Build the ML model. Sets up the desired layers and 
    compiles the tf.keres model

    :param train_ds: The training dataset used for nomalisation and determining the input_shape

    """

    # Get the input shape for the model
    for spectrogram, _ in train_ds.take(1):
        input_shape = spectrogram.shape[1:]
    print(f'Input shape: {input_shape}')

    # Normalisation Layer
    norm_layer = preprocessing.Normalization()
    norm_layer.adapt(train_ds.take(30).map(lambda x, _: x))

    #Model layout
    model = models.Sequential([
        layers.InputLayer(input_shape=input_shape),
        preprocessing.Resizing(32, 32),
        norm_layer,
        layers.Conv2D(60, 3, activation='relu'),
        #layers.Conv2D(30, 3, activation='relu'),
        #layers.Conv2D(30, 3, activation='relu'),
        #layers.Conv2D(30, 3, activation='relu'),
        layers.Flatten(),
        layers.Dense(30, activation='relu'),
        #layers.Dropout(0.1),
        layers.Dense(2),
    ])
    return model
Example #20
0
def target_model_fn():
    """The architecture of the target (victim) model.
    The attack is white-box, hence the attacker is assumed to know this architecture too."""

    model = tf.keras.models.Sequential()

    model.add(layers.InputLayer(input_shape=(WIDTH, HEIGHT, CHANNELS)))

    model.add(layers.Conv2D(32, (3, 3), activation="relu", padding="same"))
    model.add(layers.Conv2D(32, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same"))
    model.add(layers.Conv2D(64, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Flatten())

    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
    model.compile("adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model
Example #21
0
    def __init__(self):
        super().__init__()

        # stores the current probability of an image being augmented
        self.probability = tf.Variable(0.0)

        # the corresponding augmentation names from the paper are shown above each layer
        # the authors show (see figure 4), that the blitting and geometric augmentations
        # are the most helpful in the low-data regime
        self.augmenter = keras.Sequential(
            [
                layers.InputLayer(input_shape=(image_size, image_size, 3)),
                # blitting/x-flip:
                layers.RandomFlip("horizontal"),
                # blitting/integer translation:
                layers.RandomTranslation(
                    height_factor=max_translation,
                    width_factor=max_translation,
                    interpolation="nearest",
                ),
                # geometric/rotation:
                layers.RandomRotation(factor=max_rotation),
                # geometric/isotropic and anisotropic scaling:
                layers.RandomZoom(height_factor=(-max_zoom, 0.0),
                                  width_factor=(-max_zoom, 0.0)),
            ],
            name="adaptive_augmenter",
        )
Example #22
0
 def __init__(self, n_stages=4, filters=256, kernel_size=3):
     super().__init__()
     self.filters = filters
     self.kernel_size = kernel_size
     bn_axis = 3
     # build model network
     self.layer_input = layers.InputLayer(input_shape=(15, 15, 3),
                                          name='input',
                                          dtype='float16')
     self.layer_conv0 = layers.Conv2D(self.filters,
                                      self.kernel_size,
                                      padding='same')
     self.layer_batch0 = layers.BatchNormalization(axis=bn_axis)
     self.layer_activ0 = layers.Activation('relu')
     # a list of resnet blocks
     self.layer_resBlocks = [
         ResNetBlock(filters=self.filters, kernel_size=self.kernel_size)
         for _ in range(n_stages)
     ]
     # final evaluation head
     self.layer_final_conv = layers.Conv2D(1, (1, 1))
     self.layer_final_batch = layers.BatchNormalization(axis=bn_axis)
     self.layer_final_activ = layers.Activation('relu')
     self.layer_flatten = layers.Flatten()
     self.layer_dense = layers.Dense(256, activation='relu')
     self.layer_res = layers.Dense(1, activation='tanh', name='result')
Example #23
0
    def build_network(self):
        c = self.config.cnn

        model = keras.Sequential()
        model.add(
            layers.InputLayer(input_shape=self.env.observation_space.shape))

        # Scale the input values to be between 0 and 1
        max_input = np.max(self.env.observation_space.high)
        model.add(
            layers.Lambda(lambda obs: tf.cast(obs, np.float32) / max_input))

        # Create the conv layers
        for filters, kernel_size, strides in zip(c.filters, c.kernel_size,
                                                 c.strides):
            layer = layers.Conv2D(filters,
                                  kernel_size,
                                  strides,
                                  activation='relu')
            model.add(layer)

        model.add(layers.Flatten())
        model.add(layers.Dense(c.fc_size, activation='relu'))
        model.add(layers.Dense(self.n_actions))

        return model
Example #24
0
 def __init__(self, config):
     super(Decoder, self).__init__()
     self.config = config
     self.dec = Sequential([
         layers.InputLayer(input_shape=(self.config.latent_dim, )),
         layers.Dense(1024),
         layers.ReLU(),
         layers.Dense(4 * 4 * 64),
         layers.ReLU(),
         layers.Reshape((4, 4, 64)),
         layers.Conv2DTranspose(filters=64,
                                kernel_size=4,
                                strides=2,
                                padding='same'),
         layers.ReLU(),
         layers.Conv2DTranspose(filters=32,
                                kernel_size=4,
                                strides=2,
                                padding='same'),
         layers.ReLU(),
         layers.Conv2DTranspose(filters=1,
                                kernel_size=4,
                                strides=2,
                                padding='same'),
     ])
Example #25
0
    def __init__(self, name="kid", **kwargs):
        super().__init__(name=name, **kwargs)

        # KID is estimated per batch and is averaged across batches
        self.kid_tracker = keras.metrics.Mean()

        # a pretrained InceptionV3 is used without its classification layer
        # transform the pixel values to the 0-255 range, then use the same
        # preprocessing as during pretraining
        self.encoder = keras.Sequential(
            [
                layers.InputLayer(input_shape=(image_size, image_size, 3)),
                layers.Rescaling(255.0),
                layers.Resizing(height=kid_image_size, width=kid_image_size),
                layers.Lambda(
                    keras.applications.inception_v3.preprocess_input),
                keras.applications.InceptionV3(
                    include_top=False,
                    input_shape=(kid_image_size, kid_image_size, 3),
                    weights="imagenet",
                ),
                layers.GlobalAveragePooling2D(),
            ],
            name="inception_encoder",
        )
Example #26
0
    def getGenerator(self, CODE_SIZE):

        generator = Sequential()
        generator.add(L.InputLayer([CODE_SIZE],name='gen_input_layer'))
        generator.add(L.Dense(10*16*16, activation='elu', name = 'gen_Dense1'))

        generator.add(L.Reshape((16,16,10), name = 'gen_reshape'))
        generator.add(L.Conv2DTranspose(16,kernel_size=(9,9),activation='elu', name = 'gen_Conv9_1'))
        generator.add(L.Conv2DTranspose(16,kernel_size=(9,9),activation='elu', name = 'gen_Conv9_2'))
        generator.add(L.Conv2DTranspose(16,kernel_size=(9,9),activation='elu', name = 'gen_Conv9_3'))
        generator.add(L.Conv2DTranspose(16,kernel_size=(9,9),activation='elu', name = 'gen_Conv9_4'))
        generator.add(L.UpSampling2D(size=(2,2), name = 'gen_upsample1'))

        generator.add(L.Conv2DTranspose(16,kernel_size=(7,7),activation='elu', name = 'gen_Conv7_1'))
        generator.add(L.Conv2DTranspose(16,kernel_size=(7,7),activation='elu', name = 'gen_Conv7_2'))
        generator.add(L.UpSampling2D(size=(2,2), name = 'gen_upsample2'))

        generator.add(L.Conv2DTranspose(8,kernel_size=(5,5),activation='elu', name = 'gen_Conv5_1'))
        generator.add(L.Conv2DTranspose(8,kernel_size=(5,5),activation='elu', name = 'gen_Conv5_2'))
        generator.add(L.UpSampling2D(size=(2,2), name = 'gen_upsample3'))
        generator.add(L.Conv2DTranspose(8,kernel_size=(3,3),activation='elu', name = 'gen_Conv3_1'))
        generator.add(L.Conv2DTranspose(8,kernel_size=(3,3),activation='elu', name = 'gen_Conv3_2'))

        generator.add(L.Conv2D(8,kernel_size=3,activation='relu', name = 'gen_Conv3_3'))
        generator.add(L.Conv2D(3,kernel_size=3,activation='tanh', name = 'gen_Conv3_4'))

        return generator
Example #27
0
 def define_the_model(self):
     # defineste autoencoderul
     self.model = tf.keras.models.Sequential([
         layers.InputLayer(input_shape=(self.data_set.network_input_size[0], self.data_set.network_input_size[1], 1)),
         ...
     ])
     # afiseaza arhitectura modelului
     self.model.summary()
 def build_model(self):
     model = tf.keras.Sequential()
     model.add(layers.InputLayer(input_shape = self.env.observation_space.shape))
     model.add(layers.Dense(24, activation = "relu"))
     model.add(layers.Dense(24, activation = "relu"))
     model.add(layers.Dense(self.env.action_space.n, activation = "linear"))
     model.compile(optimizer = keras.optimizers.SGD(learning_rate = self.step_size, momentum = 0.5), loss = "mse")
     return model
Example #29
0
def classify_model(x):
    model = Sequential()
    model.add(layers.InputLayer(input_shape=x.shape[1:]))
    model.add(layers.Dense(128, activation="relu"))
    model.add(layers.Dense(128, activation="relu"))
    model.add(layers.Dense(5, activation="softmax"))

    return model
Example #30
0
 def _build_layers_v2(self, input_dict, num_outputs, options):
     self.model = Sequential()
     self.model.add(layers.InputLayer(input_tensor=input_dict["obs"], input_shape=(4,)))
     self.model.add(layers.Dense(4, name='l1', activation='relu'))
     self.model.add(layers.Dense(10, name='l2', activation='relu'))
     self.model.add(layers.Dense(10, name='l3', activation='relu'))
     self.model.add(layers.Dense(10, name='l4', activation='relu'))
     self.model.add(layers.Dense(2, name='l5', activation='relu'))
     return self.model.get_layer("l5").output, self.model.get_layer("l4").output