Example #1
0
def make_generator(num_channels):
    """
    The model for the generator network is defined here. 
    """
    # instantiate model
    model = Sequential()

    # starts with shape (z_dim * 4 * 4) random input
    # add noise
    model.add(GaussianNoise(stddev=1), input_size=(4, 4, z_dim))
    # do ADAin with vector in W space (from mapping net)
    model.add(ADAin())
    # do Conv 3x3
    model.add(Conv2DTranspose(num_channels // 8, (3, 3), strides=(2, 2), padding='same', use_bias=False))
    # add noise
    model.add(GaussianNoise(stddev=1))
    # do ADAin with vector in W space (from mapping net)
    model.add(ADAin())

    # do upsampling
    # add noise
    model.add(GaussianNoise(stddev=1))
    # do ADAin with vector in W space (from mapping net)
    model.add(ADAin())
    # do Conv 3x3
    model.add(Conv2DTranspose(num_channels // 4, (3, 3), strides=(2, 2), padding='same', use_bias=False))
    # add noise
    model.add(GaussianNoise(stddev=1))
    # do ADAin with vector in W space (from mapping net)
    model.add(ADAin())

    # continue...

    return model
Example #2
0
    def build_model(self, n_features, n_labels):
        """
        The method builds a new member of the ensemble and returns it.

        :type n_features: int
        :param n_features: The number of features.

        :type n_labels: int
        :param n_labels: The number of labels.
        """

        # initialize optimizer and early stopping
        self.optimizer = Adam(lr=self.hyperparameters['lr'], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False)
        self.es = EarlyStopping(monitor=f'val_mean_squared_error', min_delta=0.0, patience=self.patience, verbose=1,
                   mode='min', restore_best_weights=True)

        inputs = Input(shape=(n_features,))
        h = GaussianNoise(self.hyperparameters['noise'])(inputs)

        # the encoder part
        for i in range(self.bottelneck_layer):
            h = Dense(self.hyperparameters['neurons'][i], activation='relu',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)
            h = Dropout(self.hyperparameters['dropout'])(h)

        latent = Dense(self.hyperparameters['neurons'][self.bottelneck_layer], activation='linear',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)

        encoder = Model(inputs=inputs, outputs=latent, name='encoder')

        # the decoder part
        latent_inputs = Input(shape=(self.hyperparameters['neurons'][self.bottelneck_layer],))
        h = GaussianNoise(0.0)(latent_inputs)

        for i in range(self.bottelneck_layer + 1, self.n_hidden_layers - 1):
            h = Dense(self.hyperparameters['neurons'][i], activation='relu',
                      kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],
                                                            self.hyperparameters['l2_hidden']))(h)
            h = Dropout(self.hyperparameters['dropout'])(h)

        decoded = Dense(n_labels, activation='linear',
                        kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_out'],
                                                              self.hyperparameters['l2_out']))(h)

        decoder = Model(inputs=latent_inputs, outputs=decoded, name='decoder')

        # endocder-decoder model
        encoder_decoder = Model(inputs, decoder(encoder(inputs)), name='encoder_decoder')
        return encoder_decoder, encoder, decoder
Example #3
0
def loader(input_shape, num_outputs, output_activation="log_softmax", use_attention=False, use_conv2d=False, use_lstm=False):
    inputs = Input(shape=input_shape, name="input")
    x = inputs
    x = GaussianNoise(stddev=0.01, name="input_noise")(x)
    x = Dropout(rate=0.4, noise_shape=(None, 1, input_shape[1]), name="channel_dropout")(x)
    if use_conv2d:
        x = Reshape((input_shape[0] or -1, input_shape[1], 1), name="reshape_to_image")(x)
        x = Conv2D(128, (3, 9), (1, 6), activation=None, padding="same", name="conv2d_1")(x)
        x = BatchNormalization(name="conv2d_1_bn")(x)
        x = Activation("relu", name="conv2d_1_relu")(x)
        x = Conv2D(256, (3, 9), (1, 6), activation=None, padding="same", name="conv2d_2")(x)
        x = BatchNormalization(name="conv2d_2_bn")(x)
        x = Activation("relu", name="conv2d_2_relu")(x)
        # x = Reshape((x.shape[1] or -1, x.shape[2] * x.shape[3]), name="flatten_image_channels")(x)
        x = tf.math.reduce_max(x, axis=2, name="maxpool_image_channels")
    x = FrameLayer(512, 5, 1, name="frame1")(x)
    x = FrameLayer(512, 3, 2, name="frame2")(x)
    x = FrameLayer(512, 3, 3, name="frame3")(x)
    if use_lstm:
        x = LSTM(512, name="lstm", return_sequences=True)(x)
    x = FrameLayer(512, 1, 1, name="frame4")(x)
    x = FrameLayer(1500, 1, 1, name="frame5")(x)
    if use_attention:
        x = frequency_attention(x, d_f=60)
    x = GlobalMeanStddevPooling1D(name="stats_pooling")(x)
    x = SegmentLayer(512, name="segment1")(x)
    x = SegmentLayer(512, name="segment2")(x)
    outputs = Dense(num_outputs, name="output", activation=None)(x)
    if output_activation:
        outputs = Activation(getattr(tf.nn, output_activation), name=str(output_activation))(outputs)
    return Model(inputs=inputs, outputs=outputs, name="CLSTM")
Example #4
0
def loader(input_shape,
           num_outputs,
           core="resnet50_v2",
           output_activation="softmax"):
    # Normalize and regularize by adding Gaussian noise to input (during training only)
    inputs = Input(shape=input_shape, name="input")
    x = inputs
    x = GaussianNoise(stddev=0.01, name="input_noise")(x)
    x = Dropout(0.2,
                noise_shape=(None, 1, input_shape[1]),
                name="channel_dropout")(x)
    x = Reshape((input_shape[0] or -1, input_shape[1], 1),
                name="reshape_to_image")(x)
    # Connect untrained Resnet50 or MobileNet architecture without inputs and outputs
    if core == "mobilenet_v2":
        convnet = MobileNetV2(include_top=False, weights=None, input_tensor=x)
    elif core == "resnet50_v2":
        convnet = ResNet50V2(include_top=False, weights=None, input_tensor=x)
    # Embedding layer with timesteps
    rows, cols, channels = convnet.output.shape[1:]
    x = Reshape((rows or -1, cols * channels),
                name="flatten_channels")(convnet.output)
    x = Dense(128, activation="sigmoid", name="embedding")(x)
    x = BatchNormalization(name="embedding_bn")(x)
    # Pooling and output
    x = GlobalAveragePooling1D(name="timesteps_pooling")(x)
    outputs = Dense(num_outputs, activation=None, name="output")(x)
    if output_activation:
        outputs = Activation(getattr(tf.nn, output_activation),
                             name=str(output_activation))(outputs)
    return Model(inputs=inputs,
                 outputs=outputs,
                 name="{}_extractor".format(core))
Example #5
0
    def call(self, inputs, training):
        if training is None:
            training = K.learning_phase()

        masked_inputs = inputs
        if training:
            if self.noise_std > 0:
                masked_inputs = GaussianNoise(self.noise_std)(masked_inputs)

            if self.swap_prob > 0:
                masked_inputs = SwapNoiseMasker(
                    probs=[self.swap_prob] * self.input_dim,
                    seed=[self.seed] * 2)(masked_inputs)

            if self.mask_prob > 0:
                masked_inputs = ZeroNoiseMasker(
                    probs=[self.mask_prob] * self.input_dim,
                    seed=[self.seed] * 2)(masked_inputs)

        encoded = self.encoder(masked_inputs)
        decoded = self.decoder(encoded)

        rec_loss = K.mean(mean_squared_error(inputs, decoded))
        self.add_loss(rec_loss)

        return encoded, decoded
Example #6
0
    def _createC51Model(self):

        #print(self.state_size)
        state_input = Input(shape=(self.state_size, ))

        if self.noisy_net == True:
            distr = Dense(512)(state_input)
            distr = Dense(self.action_size)(state_input)
            distr = GaussianNoise(1)(distr)
            distr = Activation('relu')(distr)
        else:
            distr = Dense(512, activation='relu')(state_input)

        distribution_list = []
        for i in range(self.action_size):
            distribution_list.append(
                Dense(self.num_atoms, activation='softmax')(distr))

        model = Model(
            inputs=state_input, outputs=distribution_list
        )  # Here we use the new api of keras, the input and output are replaced with inputs and outputs

        opt = Adam(lr=self.network_learning_rate)
        model.compile(loss='categorical_crossentropy', optimizer=opt)

        return model
Example #7
0
    def _createModel(self):

        input = Input(shape=(self.state_size, ))
        advt = Dense(units=256, activation='relu')(input)
        if self.noisy_net:
            # Add Gaussian noise to output layer!
            advt = Dense(self.action_size)(advt)
            advt = GaussianNoise(1)(advt)
            #advt= Activation('linear') # TODO Check if this is correct
        else:
            advt = Dense(units=self.action_size)(advt)

        #Further layer to estimate state values V(s)
        value = Dense(units=256, activation='relu')(input)
        value = Dense(1)(value)

        advt = Lambda(lambda advt: advt - tf.reduce_mean(
            input_tensor=advt, axis=-1, keepdims=True))(advt)

        final = Add()([value, advt])
        model = Model(inputs=input, outputs=final)

        opt = Adam(lr=self.network_learning_rate)

        #model.compile(loss='mse', optimizer=opt)
        model.compile(loss=huber_loss,
                      optimizer=opt)  # Use Own defined Huber Loss Function

        #model.save_weights('model_DQN.h5')
        return model
Example #8
0
    def _createModel(self):
        model = Sequential()

        model.add(
            Dense(units=256,
                  activation='relu',
                  input_dim=self.state_size,
                  kernel_initializer='RandomUniform'))
        #Further hidden layer
        model.add(
            Dense(units=256,
                  activation='relu',
                  kernel_initializer='RandomUniform'))

        # Add Gaussian noise to output layer!
        model.add(Dense(self.action_size))
        model.add(GaussianNoise(1))
        model.add(Activation('linear'))

        #model.add(Dense(units=self.action_size, activation='linear', kernel_initializer='RandomUniform'))

        # Currently the Adam optimizer is the standard optimizer due to better performance than SGD
        opt = Adam(lr=self.network_learning_rate)
        #model.compile(loss='mse', optimizer=opt)
        model.compile(loss=huber_loss,
                      optimizer=opt)  # Use Own defined Huber Loss Function
        return model
Example #9
0
    def build_neural_network(self, inputs, outputs):
        """
        Create Keras neural network model and compile it.

        Args:
            inputs (int): Number of input predictor variables
            outputs (int): Number of output predictor variables
        """
        nn_input = Input(shape=(inputs, ), name="input")
        nn_model = nn_input
        for h in range(self.hidden_layers):
            nn_model = Dense(self.hidden_neurons,
                             activation=self.activation,
                             kernel_regularizer=l2(self.l2_weight),
                             name=f"dense_{h:02d}")(nn_model)
            if self.use_dropout:
                nn_model = Dropout(self.dropout_alpha,
                                   name=f"dropout_h_{h:02d}")(nn_model)
            if self.use_noise:
                nn_model = GaussianNoise(self.noise_sd,
                                         name=f"ganoise_h_{h:02d}")(nn_model)
        nn_model = Dense(outputs,
                         activation=self.output_activation,
                         name=f"dense_{self.hidden_layers:02d}")(nn_model)
        self.model = Model(nn_input, nn_model)
        if self.optimizer == "adam":
            self.optimizer_obj = Adam(lr=self.lr,
                                      beta_1=self.adam_beta_1,
                                      beta_2=self.adam_beta_2,
                                      decay=self.decay)
        elif self.optimizer == "sgd":
            self.optimizer_obj = SGD(lr=self.lr,
                                     momentum=self.sgd_momentum,
                                     decay=self.decay)
        self.model.compile(optimizer=self.optimizer, loss=self.loss)
Example #10
0
    def discriminator(self):

        model = Sequential()

        # add Gaussian noise to prevent Discriminator overfitting
        model.add(GaussianNoise(0.2, input_shape=self.img_shape))

        model.add(Conv2D(self.outputFilter, kernel_size=self.kernel_size, strides=2, input_shape=self.img_shape, padding="same"))  # 256 -> 128
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))

        # 128 -> 64 -> 32 -> 16 -> 8
        for i in range(self.upSamplingLayer):
            # print(self.outputFilter * (2 ** i))
            model.add(Conv2D(self.outputFilter * (2 ** (i+1)), kernel_size=self.kernel_size, strides=2, padding="same"))  # 128 -> 64
            # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
            model.add(LeakyReLU(alpha=0.2))
            model.add(Dropout(0.25))
            model.add(BatchNormalization(momentum=0.8))

        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.compile(loss='binary_crossentropy', optimizer=Adam(lr=self.d_lr, beta_1=self.d_beta_1), metrics=['accuracy'])

        # print("\nDiscriminator")
        # model.summary()

        return model
Example #11
0
def create_model(input_dim):

	# optimsed network shape of la
	DENSE = 128
	DROPOUT = 0.5
	C1_K  = 8   #Number of kernels/feature extractors for first layer
	C1_S  = 32  #Width of the convolutional mini networks
	C2_K  = 16
	C2_S  = 32

    # activatoin function
	leaky_relu = keras.layers.LeakyReLU(alpha=0.2)
	activation=leaky_relu
	kernel_initializer = "he_normal"


	model = keras.models.Sequential()

	model.add(GaussianNoise(0.05, input_shape=(input_dim,)))
	model.add(Reshape((input_dim, 1)))
	model.add(SeparableConv1D(C1_K, (C1_S),activation=activation, padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
	keras.layers.MaxPooling1D(pool_size=2),
	model.add(SeparableConv1D(C2_K, (C2_S), activation=activation, padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
	keras.layers.MaxPooling1D(pool_size=2),
	model.add(Flatten())
	model.add(MCDropout(DROPOUT))
	model.add(Dense(DENSE,activation=activation, kernel_constraint=keras.constraints.max_norm(1.)))
	model.add(MCDropout(DROPOUT))
	model.add(Dense(1, activation='linear', kernel_constraint=keras.constraints.max_norm(1.) ,use_bias=False))

	###########
	# sometimes model needs to be compiled outside of function
	model.compile(loss=HuberLoss(), optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999))
	return model
Example #12
0
 def _build_discriminator_latent(self,
                                 latent_dim,
                                 layers=16,
                                 width=16,
                                 hidden_activation='relu',
                                 init=RandomNormal(mean=0, stddev=0.02),
                                 add_noise=True):
     """Build a model that classifies latent vectors as real or fake."""
     input_layer = Input((latent_dim,))
     F = input_layer
     if add_noise:
         F = GaussianNoise(0.01)(F)
     for i in range(layers):
         X = Dense(width)(F)
         if add_noise:
             X = GaussianDropout(0.005)(X)
         X = LayerNormalization()(X)
         if hidden_activation == 'leaky_relu':
             X = LeakyReLU(0.02)(X)
         else:
             X = Activation(hidden_activation)(X)
         F = Concatenate()([F, X])
     X = Dense(128)(F)
     if hidden_activation == 'leaky_relu':
         X = LeakyReLU(0.02)(X)
     else:
         X = Activation(hidden_activation)(X)
     X = Dense(1)(X)
     output_layer = Activation('sigmoid')(X)
     model = Model(input_layer, output_layer)
     model.compile(Adam(clipnorm=1, lr=self.parameters["lr"]["gan_discriminator"],
                        beta_1=0.5),
                   loss=self.parameters["loss"]["adversarial"])
     return model
Example #13
0
    def call(self, inputs, training):
        if training is None:
            training = K.learning_phase()

        masked_inputs = inputs
        if training:
            if self.noise_std > 0:
                masked_inputs = GaussianNoise(self.noise_std)(masked_inputs)

            if self.swap_prob > 0:
                masked_inputs = SwapNoiseMasker(probs=[self.swap_prob] * self.input_dim,
                                                seed=[self.seed] * 2)(masked_inputs)

            if self.mask_prob > 0:
                masked_inputs = ZeroNoiseMasker(probs=[self.mask_prob] * self.input_dim,
                                                seed=[self.seed] * 2)(masked_inputs)

        x = masked_inputs
        encoded_list = []
        for encoder in self.encoders:
            x = encoder(x)
            encoded_list.append(x)

        encoded = Concatenate()(encoded_list) if len(encoded_list) > 1 else encoded_list[0]
        decoded = self.decoder(encoded)

        rec_loss = K.mean(mean_squared_error(inputs, decoded))
        self.add_loss(rec_loss)

        return encoded, decoded
Example #14
0
    def Unet3D(self):
        img_input = Input(shape=self.input_shape)

        d0 = GaussianNoise(self.noise)(img_input)
        d1 = self._conv3d(d0, self.base_filter, se_block=False, se_ratio=self.se_ratio, downsizing=False, loop=1)
        d2 = self._conv3d(d1, self.base_filter*2, se_ratio=self.se_ratio)
        d3 = self._conv3d(d2, self.base_filter*4, se_ratio=self.se_ratio)
        d4 = self._conv3d(d3, self.base_filter*8, se_ratio=self.se_ratio)

        if self.depth == 4:
            d5 = self._conv3d(d4, self.base_filter*16, se_ratio=self.se_ratio)

            u4 = self._upconv3d(d5, d4, self.base_filter*8, se_ratio=self.se_ratio)
            u3 = self._upconv3d(u4, d3, self.base_filter*4, se_ratio=self.se_ratio)
        elif self.depth == 3:
            u3 = self._upconv3d(d4, d3, self.base_filter*4, se_ratio=self.se_ratio)
        else:
            raise Exception('Depth size must be 3 or 4. You put ', self.depth_size)

        u2 = self._upconv3d(u3, d2, self.base_filter*2, se_ratio=self.se_ratio)
        u1 = self._upconv3d(u2, d1, self.base_filter, se_block=False, se_ratio=self.se_ratio, loop=1)

        if self.classes == 1:
            img_output = Conv3D(self.classes, (1, 1, 1), strides=(1, 1, 1), padding='same', activation='sigmoid')(u1)
        else:
            img_output = Conv3D(self.classes, (1, 1, 1), strides=(1, 1, 1), padding='same', activation='softmax')(u1)

        model = Model(img_input, img_output, name=self.model)

        return model
Example #15
0
 def __init__(self, hidden_units, act_fn=default_activation, output_shape=1, out_activation=None, out_layer=True):
     """
     Add a gaussian noise to to the result of Dense layer. The added gaussian noise is not related to the origin input.
     Args:
         hidden_units: like [32, 32]
         output_shape: units of last layer
         out_activation: activation function of last layer
         out_layer: whether need specifing last layer or not
     """
     super().__init__()
     for u in hidden_units:
         self.add(GaussianNoise(0.4))  # Or use kwargs
         self.add(Dense(u, act_fn))
     if out_layer:
         self.add(GaussianNoise(0.4))
         self.add(Dense(output_shape, out_activation))
Example #16
0
def get_model(noise=False, nn_scale=8, lookback=4):
    """
    :param noise: Standard deviation of Gaussian noise that adds to the input. Default - False, no noise added
    :param nn_scale: multiplier that defines number of convolutional filters 4,8,16 works best
    :param lookback: number of month of data used for input
    :return:
    """
    sample_shape = (30, 64, 2 * lookback)  # lat, lon, channel
    model = Sequential()
    model.add(InputLayer(input_shape=sample_shape))
    if noise:
        model.add(GaussianNoise(stddev=noise))
    model.add(Conv2D(64 * nn_scale, kernel_size=(4, 8), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))
    model.add(Conv2D(32 * nn_scale, kernel_size=(2, 2), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))
    model.add(Conv2D(16 * nn_scale, kernel_size=(2, 2), activation='relu'))
    model.add(Conv2D(8 * nn_scale, kernel_size=(2, 2), activation='relu'))
    model.add(Flatten())
    model.add(Dense(8 * nn_scale))
    model.add(Dense(1))
    model.summary()
    model.compile(optimizer='adam', loss='huber_loss')
    return model
    def controller_model(goal_shape, input_shape, action_space):

        # Goal input
        inputA = (Input(goal_shape))
        x = Dense(512,
                  input_shape=goal_shape,
                  activation="relu",
                  kernel_initializer='he_uniform')(inputA)
        #x = Model(inputs = inputA, outputs = x)

        # History input
        inputB = (Input(input_shape))
        y = TimeDistributed(GaussianNoise(0.1))(inputB)
        y = LSTM(512,
                 input_shape=input_shape,
                 activation="tanh",
                 kernel_initializer='he_uniform')(y)
        #y = Model(inputs = inputB, outputs = y)

        combined = concatenate([x, y])

        z = Dense(256, activation="relu",
                  kernel_initializer='he_uniform')(combined)
        z = Dense(64, activation="relu", kernel_initializer='he_uniform')(z)
        z = Dense(action_space,
                  activation="linear",
                  kernel_initializer='he_uniform')(z)
        model = Model(inputs=[inputA, inputB], outputs=z, name='Lane_Change')
        model.compile(loss="mse",
                      optimizer=RMSprop(lr=0.00025, rho=0.95, epsilon=0.01),
                      metrics=["accuracy"])
        model.summary()
        return model
Example #18
0
    def build_discriminator(self):
        """
        Input dimensions: (DAT_SHP)
        Output dimensions: (1) + (PAR_DIM)
        """

        ##### INPUT LAYERS
        X_in = Input(self.DAT_SHP)
        y_in = Input(self.PAR_DIM)

        ##### ADD NOISE TO IMAGE
        Xnet = GaussianNoise(0.00)(X_in)

        ynet = Dense(np.prod(self.DAT_SHP))(y_in)
        ynet = Reshape(self.DAT_SHP)(ynet)
        net = concatenate([Xnet, ynet], axis=-1)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH * 2, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH * 4, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### TO DENSE
        net = Flatten()(net)

        ##### DENSE LAYER ($$ NO ACTIVATION!)
        net = Dense(64)(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### VALIDITY
        w_out = Dense(1, activation='sigmoid')(net)

        ##### BUILD AND COMPILE MODEL
        model = Model(inputs=[X_in, y_in], outputs=w_out)
        model.compile(loss=BinaryCrossentropy(label_smoothing=0.3),
                      metrics=['accuracy'],
                      optimizer=Adam(lr=self.LEARN_RATE, beta_1=0.5))

        ##### RETURN MODEL
        return model
Example #19
0
 def add_drops(model, drop_out, k):
     if dg[k].upper() == "D":
         model.add(Dropout(drop_out[0]))
     elif dg[k].upper() == "G":
         model.add(GaussianNoise(drop_out[k]))
     else:
         pass
     return model
Example #20
0
    def build(self, hp):
        #GRUs
        inputs_gru = Input(shape=(timesteps, data_dim))
        noise_layer = GaussianNoise(
            hp.Choice('gaussian_noise',
                      [0.0, 0.25, 0.5, 0.75, 1.0]))(inputs_gru)
        start_gru_units = hp.Choice('units_first_layer_gru', [64, 128, 256])
        gru = Bidirectional(
            GRU(start_gru_units,
                return_sequences=True,
                batch_input_shape=(tot_samples, timesteps,
                                   data_dim)))(noise_layer)
        gru_spat_drop = 0.1
        #         gru_spat_drop = hp.Choice('gru_spat_dropout', [0.0, 0.01, 0.1, 0.25,0.5])
        old_gru_units = start_gru_units

        for i in range(3):
            spatialdrop = SpatialDropout1D(gru_spat_drop)(gru)
            cur_gru_units = hp.Choice(
                'units_gru_deep' + str(i),
                [max(32, old_gru_units // 2), old_gru_units])
            gru = Bidirectional(GRU(cur_gru_units,
                                    return_sequences=True))(spatialdrop)
            old_gru_units = cur_gru_units

        #Pool into dense
        global_maxpool = GlobalMaxPooling1D()(gru)
        dense_act = "tanh"
        dense_drop = hp.Choice('dense_dropout', [0.0, 0.1])
        start_dense_units = hp.Choice('units_first_dense', [256, 512, 1024])
        dense1 = Dense(start_dense_units, activation=dense_act)(global_maxpool)

        drop = Dropout(dense_drop)(dense1)
        old_dense_units = start_dense_units

        for i in range(4):
            cur_dense_units = hp.Choice(
                'units_dense_deep' + str(i),
                [max(32, old_dense_units // 2), old_dense_units])
            dense = Dense(cur_dense_units, activation=dense_act)(drop)
            drop = Dropout(dense_drop)(dense)
            old_dense_units = cur_dense_units

        output_layer = Dense(num_classes, activation="softmax")(drop)
        model = Model(inputs=inputs_gru,
                      outputs=output_layer,
                      name="SCSSK_gru_only")
        loss_choice = hp.Choice('loss_function',
                                ["focal", "categorical_crossentropy"])
        if loss_choice == "focal":
            model.compile(loss=focal_loss(),
                          optimizer="adam",
                          metrics=['accuracy'])
        elif loss_choice == "categorical_crossentropy":
            model.compile(loss="categorical_crossentropy",
                          optimizer="adam",
                          metrics=['accuracy'])
        return model
Example #21
0
def BBBSeg(base_filter: int = 32) -> Model:
    def conv3d(
        inputs: Tensor, 
        filters: int, 
        downsizing: bool = True, 
        loop: int = 2) -> Tensor:

        if downsizing:
            inputs = MaxPool3D(pool_size=(2, 2, 2))(inputs)
        x = inputs
        for _ in range(loop):
            x = Conv3D(filters, (3, 3, 3), strides=(1, 1, 1), use_bias=False, padding='same')(x)
            x = InstanceNormalization()(x)
            x = Activation('relu')(x)
        return x

    def upconv3d(
        inputs: Tensor, 
        skip_input: Tensor, 
        filters: int, 
        loop: int = 2) -> Tensor:

        def _crop_concat() -> Tensor:
            def crop(concat_layers: List[Tensor]) -> K:
                big, small = concat_layers
                big_shape, small_shape = tf.shape(big), tf.shape(small)
                sh, sw, sd = small_shape[1], small_shape[2], small_shape[3]
                bh, bw, bd = big_shape[1], big_shape[2] ,big_shape[3]
                dh, dw, dd = bh-sh, bw-sw, bd-sd
                big_crop = big[:,:-dh,:-dw,:-dd,:]
                return K.concatenate([small, big_crop], axis=-1)
            return Lambda(crop)

        x = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(inputs)
        x = Conv3DTranspose(filters, (2 ,2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)

        x = _crop_concat()([x, skip_input])
        x = conv3d(x, filters, downsizing=False, loop=loop)
        return x

    img_input = Input(shape=(None, None, None, 1))
    d0 = GaussianNoise(0.1)(img_input)
    d1 = conv3d(d0, base_filter, downsizing=False, loop=1)
    d2 = conv3d(d1, base_filter*2, loop=2)
    d3 = conv3d(d2, base_filter*4, loop=2)
    d4 = conv3d(d3, base_filter*8, loop=2)
    d5 = conv3d(d4, base_filter*16, loop=2)

    u4 = upconv3d(d5, d4, base_filter*8)
    u3 = upconv3d(u4, d3, base_filter*4)
    u2 = upconv3d(u3, d2, base_filter*2)
    u1 = upconv3d(u2, d1, base_filter, loop=1)
    img_output = Conv3D(2, (1, 1, 1), strides=(1, 1, 1), padding='same', activation='softmax')(u1)

    model = Model(img_input, img_output, name='unet')
    return model
Example #22
0
 def network(self):
     """ Actor Network for Policy function Approximation, using a tanh
     activation for continuous control. We add parameter noise to encourage
     exploration, and balance it with Layer Normalization.
     """
     inp = Input((self.env_dim))
     #
     x = Dense(256, activation='relu')(inp)
     x = GaussianNoise(1.0)(x)
     #
     x = Flatten()(x)
     x = Dense(128, activation='relu')(x)
     x = GaussianNoise(1.0)(x)
     #
     out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)
     out = Lambda(lambda i: i * self.act_range)(out)
     #
     return Model(inp, out)
Example #23
0
def lstm_model():
    input_vector = Input(shape=(MAXLEN, DOC_VEC_SIZE))
    layers = Bidirectional(LSTM(1024, return_sequences=True))(input_vector)
    layers = GaussianNoise(0.15)(layers)
    layers = Bidirectional(LSTM(512))(layers)
    layers = Dropout(0.3)(layers)
    layers = Dense(DOC_VEC_SIZE)(layers)

    return Model(inputs=[input_vector], outputs=[layers])
Example #24
0
    def __init__(self,
                 data_size=(28 * 28),
                 latent_size=10,
                 encoding_sequence=[],
                 decoding_sequence=None,
                 activation="relu",
                 tensorboard_logging=True,
                 optimizer=None):
        self.latent_size = latent_size
        self.model = Sequential()
        self.encoder = Sequential()
        self.decoder = Sequential()

        # Encoding sequence
        self.model.add(GaussianNoise(0.1, input_shape=(data_size, )))
        for size in encoding_sequence:
            self.model.add(Dense(size, activation=activation))
            self.encoder.add(self.model.layers[-1])

        # Reach the latent space
        self.model.add(Dense(latent_size))
        self.encoder.add(self.model.layers[-1])
        self.model.add(BatchNormalization())
        self.encoder.add(self.model.layers[-1])
        self.model.add(Activation('sigmoid'))
        self.decoder.add(self.model.layers[-1])

        # Decoding sequence
        if decoding_sequence is None:
            _decoding_sequence = reversed(encoding_sequence)
        for size in _decoding_sequence:
            self.model.add(Dense(size, activation=activation))
            self.decoder.add(self.model.layers[-1])
        # Final linear decoder
        self.model.add(Dense(data_size, activation="sigmoid"))
        self.decoder.add(self.model.layers[-1])

        # Logging
        self.logging = False
        if tensorboard_logging:
            self.logging = True
            logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
            self.tensorboard_callback = TensorBoard(log_dir=logdir)

        # Set the optimizer
        if optimizer is None:
            _optimizer = "adam"
        else:
            _optimizer = optimizer

        # Compile the model
        self.model.compile(loss='mse', optimizer=_optimizer)

        # Compile encoder and decoders, loss and optimizers are irrelevant as these models are not trained.
        self.encoder.compile(loss='mse', optimizer="adam")
        self.decoder.compile(loss='mse', optimizer="adam")
Example #25
0
def Generator(latent_dim, n_classes=2):
    in_label = Input(shape=(1, ))
    # embedding for categorical input
    li = Embedding(n_classes, 50)(in_label)
    # linear multiplication
    n_nodes = 8 * 8
    li = Dense(n_nodes)(li)
    # reshape to additional channel
    li = Reshape((8, 8, 1))(li)

    n_nodes = 128 * 8 * 8
    # image generator input
    in_lat = Input(shape=(latent_dim, ))
    x = Dense(n_nodes)(in_lat)
    x = LeakyReLU(alpha=0.2)(x)
    x = Reshape((8, 8, 128))(x)

    # merge image gen and label input
    merge = Concatenate()([x, li])

    # upsample to 16x16
    x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(merge)
    x = LeakyReLU(alpha=0.2)(x)
    x = GaussianNoise(0.5)(x)
    # upsample to 32x32
    x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = GaussianNoise(0.5)(x)
    # upsample to 64x64
    x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = GaussianNoise(0.5)(x)
    # upsample to 128x128
    x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = GaussianNoise(0.5)(x)
    # output layer 128x128x3
    out = Conv2D(3, (5, 5), activation='tanh', padding='same')(x)

    # define model
    model = Model([in_lat, in_label], out)
    return model
Example #26
0
    def build_encoder(self):
        """
        Input dimensions: (DAT_SHP)
        Output dimensions: (1) + (PAR_DIM)
        """

        ##### INPUT LAYERS
        X_in = Input(self.DAT_SHP)

        ##### ADD NOISE TO IMAGE
        net = GaussianNoise(0.00)(X_in)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH * 2, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### CONV2D
        net = SpectralNormalization(
            Conv2D(self.DEPTH * 4, (4, 2),
                   strides=(2, 1),
                   padding='same',
                   kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### TO DENSE
        net = Flatten()(net)

        ##### DENSE LAYER ($$ NO ACTIVATION!)
        net = Dense(64)(net)
        net = LeakyReLU(alpha=0.2)(net)

        ##### PARMETERS
        y_out = Dense(self.PAR_DIM, activation='linear')(net)

        ##### NOISE
        z_out = Dense(self.LAT_DIM, activation='linear')(net)

        ##### BUILD AND COMPILE MODEL
        model = Model(inputs=[X_in], outputs=[y_out, z_out])

        ##### RETURN MODEL
        return model
Example #27
0
def KidneyTumorSeg(input_shape: Tuple[Optional[int], Optional[int],
                                      Optional[int], int],
                   num_labels: int,
                   base_filter: int = 32,
                   depth: int = 4,
                   se_ratio: int = 16,
                   noise: float = 0.1,
                   last_relu: bool = False) -> Model:

    img_input = Input(shape=input_shape)

    d0 = GaussianNoise(noise)(img_input)
    d1 = conv3d(d0,
                base_filter,
                is_se_block=False,
                se_ratio=se_ratio,
                downsizing=False,
                loop=1)
    d2 = conv3d(d1, base_filter * 2, se_ratio=se_ratio)
    d3 = conv3d(d2, base_filter * 4, se_ratio=se_ratio)
    d4 = conv3d(d3, base_filter * 8, se_ratio=se_ratio)

    if depth == 4:
        d5 = conv3d(d4, base_filter * 16, se_ratio=se_ratio)

        u4 = upconv3d(d5, d4, base_filter * 8, se_ratio=se_ratio)
        u3 = upconv3d(u4, d3, base_filter * 4, se_ratio=se_ratio)
    elif depth == 3:
        u3 = upconv3d(d4, d3, base_filter * 4, se_ratio=se_ratio)
    else:
        raise Exception('Depth size must be 3 or 4. You put ', depth)

    u2 = upconv3d(u3, d2, base_filter * 2, se_ratio=se_ratio)
    u1 = upconv3d(u2,
                  d1,
                  base_filter,
                  is_se_block=False,
                  se_ratio=se_ratio,
                  loop=1)

    if num_labels == 1:
        img_output = Conv3D(num_labels, (1, 1, 1),
                            strides=(1, 1, 1),
                            padding='same',
                            activation='sigmoid')(u1)
    else:
        img_output = Conv3D(num_labels, (1, 1, 1),
                            strides=(1, 1, 1),
                            padding='same',
                            activation='softmax')(u1)

    model = Model(img_input, img_output)

    return model
Example #28
0
    def generate_model(self):
        """
        Model for MLP multiple regression (s2s)

        :return:
        """

        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Adding the possibility of using a GaussianNoise Layer for regularization
        if 'noise' in self.config['arch']:
            noise = self.config['arch']['noise']
        else:
            noise = 0

        if 'batchnorm' in self.config['arch']:
            bnorm = self.config['arch']['batchnorm']
        else:
            bnorm = False

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimension = self.config['odimensions']

        data_input = Input(shape=(idimensions))

        if noise != 0:
            layer = GaussianNoise(noise)(data_input)
            layer = Dense(full_layers[0])(layer)

            layer = generate_activation(activation)(layer)
            layer = Dropout(rate=dropout)(layer)
        else:
            layer = Dense(full_layers[0])(data_input)
            if bnorm:
                layer = BatchNormalization()(layer)
            layer = generate_activation(activation)(layer)
            layer = Dropout(rate=dropout)(layer)

        for units in full_layers[1:]:
            layer = Concatenate()([data_input, layer])
            layer = Dense(units=units)(layer)
            if bnorm:
                layer = BatchNormalization()(layer)
            layer = generate_activation(activation)(layer)

            layer = Dropout(rate=dropout)(layer)

        output = Dense(odimension, activation='linear')(layer)

        self.model = Model(inputs=data_input, outputs=output)
Example #29
0
    def build_model_storm(hp):
        n_layers = hp.Param('n_layers', [1, 2, 3, 4, 5], ordered=True)
        weight_decay = hp.Param('weight_decay', [0, 1e-5, 1e-4, 1e-3],
                                ordered=True)
        num_hidden = hp.Param('num_hidden', [100, 200, 300, 400, 500],
                              ordered=True)
        model = tf.keras.Sequential()
        model.add(tf.keras.layers.Flatten())
        for i in range(n_layers):
            model.add(
                tf.keras.layers.Dense(
                    num_hidden,
                    activation=hp.Param('activation',
                                        ['relu', 'tanh', 'elu', 'selu']),
                    kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
                ))

            if hp.Param('batch_norm', [True, False]):
                model.add(BatchNormalization())

            if hp.Param('add_noise', [True, False]):
                model.add(
                    GaussianNoise(
                        hp.Param('noise_std', [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
                                 ordered=True)))

            if hp.Param('add_dropout', [True, False]):
                model.add(
                    Dropout(
                        hp.Param('dropout_rate', [0.1, 0.2, 0.3, 0.4, 0.5],
                                 ordered=True)))

        model.add(
            tf.keras.layers.Dense(
                CLASSES,
                kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))

        opt = hp.Param('optimizer', ['adam', 'sgd'])
        lr = hp.Param('learning_rate', [1e-2, 1e-3, 1e-4], ordered=True)
        if opt == 'adam':
            optimizer = Adam(lr=lr,
                             epsilon=hp.Param('epsilon',
                                              [1e-12, 1e-10, 1e-8, 1e-6],
                                              ordered=True))
        else:
            optimizer = SGD(lr=lr,
                            momentum=hp.Param('momentum', [0.85, 0.9, 0.95],
                                              ordered=True))

        loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        model.compile(loss=loss_fn, optimizer=optimizer, metrics=['accuracy'])

        return model
Example #30
0
    def build_discriminator(self):
        """
        Input:
            * image of IMG_SHP
        Output:
            * binary real/fake classification
            * vector of CLS_SHP with one-hot-encoded class
        """

        ##### INPUT IMAGE
        X_in = Input(self.IMG_SHP)

        ##### ADD NOISE TO IMAGE
        net = GaussianNoise(0.05)(X_in)

        ##### CONV2D LAYER WITH STRIDE 2
        net = Conv2D(self.depth, (4, 4),
                     strides=(2, 2),
                     padding='same',
                     kernel_initializer=self.init)(net)
        #net = BatchNormalization()(net)
        net = LeakyReLU()(net)
        net = Dropout(0.4)(net)

        ##### CONV2D LAYER WITH STRIDE 2
        net = Conv2D(self.depth, (4, 4),
                     strides=(2, 2),
                     padding='same',
                     kernel_initializer=self.init)(net)
        #net = BatchNormalization()(net)
        net = LeakyReLU()(net)
        net = Dropout(0.4)(net)

        ##### TO DENSE
        net = Flatten()(net)

        ##### DENSE LAYER
        net = Dense(self.depth)(net)
        #net = BatchNormalization()(net)
        net = LeakyReLU()(net)
        net = Dropout(0.4)(net)

        ##### OUTPUT1: BINARY CLASSIFICATION
        w_out = Dense(1, activation='sigmoid')(net)

        ##### OUTPUT2: ONE-HOT-VECTOR OF CLASS
        y_out = Dense(self.CLS_SHP, activation='softmax')(net)

        ##### BUILD, COMPILE AND RETURN MODEL
        model = Model(inputs=X_in, outputs=[w_out, y_out])
        model.compile(loss=['binary_crossentropy', 'categorical_crossentropy'],
                      optimizer=Adam(lr=0.0002, beta_1=0.5))
        return model