Beispiel #1
0
def Decoder(hidden_sizes,
            use_BN=False,
            use_LN=False,
            use_WN=False,
            hidden_activation='relu'):
    inputs = tf.keras.Input(shape=(16, ))
    if use_WN:
        for i in range(len(hidden_sizes)):
            if i == 0:
                dec = WeightNormalization(
                    Dense(hidden_sizes[i],
                          activation=hidden_activation))(inputs)
            else:
                dec = WeightNormalization(
                    Dense(hidden_sizes[i], activation=hidden_activation))(dec)
            if use_BN:
                dec = BatchNormalization()(dec)
            if use_LN:
                dec = LayerNormalization()(dec)
    else:
        for i in range(len(hidden_sizes)):
            if i == 0:
                dec = Dense(hidden_sizes[i],
                            activation=hidden_activation)(inputs)
            else:
                dec = Dense(hidden_sizes[i], activation=hidden_activation)(dec)
            if use_BN:
                dec = BatchNormalization()(dec)
            if use_LN:
                dec = LayerNormalization()(dec)
    dec = Dense(8, activation='sigmoid')(dec)
    model = tf.keras.Model(inputs, dec)
    return model
Beispiel #2
0
def build_model():

    model = Sequential([
        Input(batch_input_shape=(None, 1000, 12)),
        BatchNormalization(),
        WeightNormalization(
            Dense(512, activation='relu', kernel_initializer='he_normal')),
        Dropout(.6),
        BatchNormalization(),
        WeightNormalization(
            Dense(512, activation='relu', kernel_initializer='he_normal')),
        Dropout(.4),
        BatchNormalization(),
        WeightNormalization(
            Dense(256, activation='relu', kernel_initializer='he_normal')),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(128, activation='relu', kernel_initializer='he_normal')),
        Dropout(.2),
        Flatten(),
        BatchNormalization(),
        Dense(5, activation='sigmoid', kernel_initializer='glorot_normal')
    ])

    model.compile(optimizer='adam',
                  loss=tf.keras.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])

    return model
def build_model():

    model = Sequential([
        BatchNormalization(),
        WeightNormalization(
            Dense(
                1028,
                activation='relu',
                kernel_initializer='he_uniform',
            )),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(
                512,
                activation='relu',
                kernel_initializer='he_uniform',
            )),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(206,
                  activation='sigmoid',
                  kernel_initializer='glorot_uniform')),
        Dropout(.2),
    ])

    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
    )

    return model
Beispiel #4
0
 def residual(inputs, n_filters, expansion, kernel_size, scaling):
     linear = 0.8
     x = WeightNormalization(
         Conv2D(n_filters * expansion, (1, 1),
                padding='same',
                activation='relu'))(inputs)
     x = WeightNormalization(
         Conv2D(int(n_filters * linear), (1, 1), padding='same'))(x)
     x = WeightNormalization(Conv2D(n_filters, kernel_size,
                                    padding='same'))(x)
     if scaling:
         x = Lambda(lambda t: t * scaling)(x)
     x = Add()([inputs, x])
     return x
def model1():
	input_u = Input(shape = (u_fts_num,) )
	layer_u = layer_BDWD(1024, activation = activation, kn_init = initializer, kn_reg = kn_reg, bias_init = bias_init_carefully) (input_u)
	layer_u = layer_BDWD(1024, activation = activation, kn_init = initializer, kn_reg = kn_reg, bias_init = bias_init_carefully) (layer_u)
	layer_u = layer_BDWD(512, activation = activation, kn_init = initializer, kn_reg = kn_reg, bias_init = bias_init_carefully) (layer_u)
	out_put = WeightNormalization(Dense(i_fts_num, activation = 'sigmoid' ))(layer_u)
	return Model(inputs=[input_u, ], outputs= [out_put])
Beispiel #6
0
    def __init__(self, num_output_time_series, window_size, kernel_size,
                 output_size, kernel_initializer, activation):
        super(DownsampleLayerWithAttention, self).__init__()
        self.num_output_time_series = num_output_time_series
        self.output_size = output_size
        self.kernel_initializer = kernel_initializer

        self.down_tcn = tf.keras.layers.Conv1D(
            filters=num_output_time_series,
            kernel_size=kernel_size,
            padding="causal",
            kernel_initializer=kernel_initializer,
            name=F"downsample_tcn")
        self.weight_norm_down_tcn = WeightNormalization(
            self.down_tcn, data_init=False, name=F"wn_downsample_tcn")

        self.query_dense_layer = tf.keras.layers.Dense(output_size)

        self.key_dense_layer = SepDenseLayer(num_output_time_series,
                                             window_size,
                                             window_size,
                                             kernel_initializer,
                                             activation=activation,
                                             use_bias=True,
                                             name="key")

        self.value_dense_layer = SepDenseLayer(num_output_time_series,
                                               window_size,
                                               window_size,
                                               kernel_initializer,
                                               activation=activation,
                                               use_bias=False,
                                               name="value")
        self.post_attention_layer = DotAttentionLayer(window_size)
Beispiel #7
0
def create_keras_model(inputShape,
                       nClasses,
                       scale=2,
                       n_filters=32,
                       depth=8,
                       residual_expansion=6,
                       residual_scaling=None):
    def residual(inputs, n_filters, expansion, kernel_size, scaling):
        linear = 0.8
        x = WeightNormalization(
            Conv2D(n_filters * expansion, (1, 1),
                   padding='same',
                   activation='relu'))(inputs)
        x = WeightNormalization(
            Conv2D(int(n_filters * linear), (1, 1), padding='same'))(x)
        x = WeightNormalization(Conv2D(n_filters, kernel_size,
                                       padding='same'))(x)
        if scaling:
            x = Lambda(lambda t: t * scaling)(x)
        x = Add()([inputs, x])
        return x

    inputs = Input(shape=inputShape)

    # main branch
    xm = WeightNormalization(Conv2D(n_filters, (3, 3), padding='same'))(inputs)
    for i in range(depth):
        xm = residual(xm,
                      n_filters,
                      residual_expansion,
                      kernel_size=3,
                      scaling=residual_scaling)
    xm = WeightNormalization(
        Conv2D(nClasses * scale**2, (3, 3), padding='same'))(xm)
    xm = Lambda(lambda x: tf.nn.depth_to_space(x, scale))(xm)

    # skip branch
    xs = WeightNormalization(
        Conv2D(nClasses * scale**2, (5, 5), padding='same'))(inputs)
    xs = Lambda(lambda x: tf.nn.depth_to_space(x, scale))(xs)

    outputs = Add()([xm, xs])

    model = Model(inputs, outputs, name="wdsr")

    return model
 def layer_cpl(input_layer):
   '''BN - DROPOUT - WEIGHTNORMAL - DENSE'''
   layer = BatchNormalization() (input_layer)
   layer = Dropout(0.25 ) (layer)
   dense = Dense(n_components, activation = activation, 
                 kernel_initializer = initializer, kernel_regularizer = kn_reg ,
                 bias_initializer = bias_init)
   layer = WeightNormalization(dense) (layer)
   return layer
Beispiel #9
0
def Encoder(hidden_sizes,
            use_BN=False,
            use_LN=False,
            use_WN=False,
            encoder_activation='tanh',
            hidden_activation='relu',
            enc_kernel_reg=None,
            enc_activity_reg=None,
            additional_layers=[]):
    inputs = tf.keras.Input(shape=(8, ))
    if use_WN:
        for i in range(len(hidden_sizes)):
            if i == 0:
                enc = WeightNormalization(
                    Dense(hidden_sizes[i],
                          activation=hidden_activation))(inputs)
            else:
                enc = WeightNormalization(
                    Dense(hidden_sizes[i], activation=hidden_activation))(enc)
            if use_BN:
                enc = BatchNormalization()(enc)
            if use_LN:
                enc = LayerNormalization()(enc)
    else:
        for i in range(len(hidden_sizes)):
            if i == 0:
                enc = Dense(hidden_sizes[i],
                            activation=hidden_activation)(inputs)
            else:
                enc = Dense(hidden_sizes[i], activation=hidden_activation)(enc)
            if use_BN:
                enc = BatchNormalization()(enc)
            if use_LN:
                enc = LayerNormalization()(enc)

    enc = Dense(16,
                activation=encoder_activation,
                kernel_regularizer=enc_kernel_reg,
                activity_regularizer=enc_activity_reg)(enc)
    for add_layer in additional_layers:
        enc = add_layer()(enc)
    model = tf.keras.Model(inputs, enc)
    return model
Beispiel #10
0
def final_pipe():

    model = Sequential([
        BatchNormalization(),
        WeightNormalization(
            Dense(
                1028,
                activation='relu',
                kernel_initializer='he_uniform',
            )),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(
                512,
                activation='relu',
                kernel_initializer='he_uniform',
            )),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(206,
                  activation='sigmoid',
                  kernel_initializer='glorot_uniform')),
        Dropout(.2),
        BatchNormalization(),
        WeightNormalization(
            Dense(1, activation='sigmoid',
                  kernel_initializer='glorot_uniform')),
        Dropout(.2),
    ])

    model.compile(
        optimizer='adam',
        loss='mean_squared_logarithmic_error',
    )

    return model
Beispiel #11
0
def DNN_model(input_size, output_size):
    """inputs = Input((input_size, ))

    outputs = BatchNormalization()(inputs)
    outputs = Dropout(0.20)(outputs)
    outputs = Dense(1024, activation="relu")(outputs)
    outputs = WeightNormalization()(outputs)


    outputs = BatchNormalization()(outputs)
    outputs = Dropout(0.20)(outputs)
    outputs = Dense(1024, activation="relu")(outputs)
    outputs = WeightNormalization()(outputs)

    outputs = BatchNormalization()(outputs)
    outputs = Dense(output_size, activation="sigmoid")(outputs)
    outputs = WeightNormalization()(outputs)"""

    model = Sequential([Input(input_size)])

    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(WeightNormalization(Dense(1024, activation="relu")))

    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(WeightNormalization(Dense(1024, activation="relu")))

    model.add(BatchNormalization())
    model.add(WeightNormalization(Dense(output_size, activation="sigmoid")))

    #model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer=AdamW(lr=1e-3, weight_decay=1e-5, clipvalue=756),
                  loss="binary_crossentropy")

    return model
Beispiel #12
0
	def __init__(self, k, d, normalized=True):
		super(Residual, self).__init__()
		if normalized:
			self.causal1D = WeightNormalization(Conv1D(kernel_size=k,
													   padding="causal",
													   dilation_rate=2 ** (d - 1),
													   activation="relu",
													   filters=64,
													   kernel_initializer='random_normal'))
		else:
			self.causal1D = Conv1D(kernel_size=k,
								   padding="causal",
								   dilation_rate=2 ** (d - 1),
								   activation="relu",
								   filters=64,
								   kernel_initializer='random_normal')
		self.drop = SpatialDropout1D(0.1)
		self.matching_1D = Conv1D(kernel_size=1,
								  padding="same",
								  activation="relu",
								  filters=64,
								  kernel_initializer='random_normal')
    def __init__(self, base_filters=32, lrelu_alpha=0.2, pad_type="reflect"):
        super(Discriminator, self).__init__(name="Discriminator")
        if pad_type == "reflect":
            self.flat_pad = ReflectionPadding2D()
        elif pad_type == "constant":
            self.flat_pad = ZeroPadding2D()
        else:
            raise ValueError(f"pad_type not recognized {pad_type}")

        self.flat_conv = Conv2D(base_filters, 3)
        self.flat_lru = LeakyReLU(lrelu_alpha)
        self.strided_conv1 = StridedConv(base_filters * 2,
                                         lrelu_alpha,
                                         pad_type,
                                         gp_num=32)
        self.strided_conv2 = StridedConv(base_filters * 4,
                                         lrelu_alpha,
                                         pad_type,
                                         gp_num=64)
        self.gp_norm = GroupNormalization(groups=64, axis=-1)
        self.conv2 = WeightNormalization(Conv2D(base_filters * 8, 3))
        self.lrelu = LeakyReLU(lrelu_alpha)

        self.final_conv = Conv2D(1, 3)
Beispiel #14
0
    def build(self, input_shape):

        with K.name_scope(
                self.name
        ):  # name scope used to make sure weights get unique names
            self.layers = []
            self.res_output_shape = input_shape

            for k in range(2):
                name = 'conv1D_{}'.format(k)
                with K.name_scope(
                        name
                ):  # name scope used to make sure weights get unique names
                    conv = Conv1D(filters=self.nb_filters,
                                  kernel_size=self.kernel_size,
                                  dilation_rate=self.dilation_rate,
                                  padding=self.padding,
                                  name=name,
                                  kernel_initializer=self.kernel_initializer)
                    if self.use_weight_norm:
                        from tensorflow_addons.layers import WeightNormalization
                        # wrap it. WeightNormalization API is different than BatchNormalization or LayerNormalization.
                        with K.name_scope('norm_{}'.format(k)):
                            conv = WeightNormalization(conv)
                    self._build_layer(conv)

                with K.name_scope('norm_{}'.format(k)):
                    if self.use_batch_norm:
                        self._build_layer(BatchNormalization())
                    elif self.use_layer_norm:
                        self._build_layer(LayerNormalization())
                    elif self.use_weight_norm:
                        pass  # done above.

                self._build_layer(Activation(self.activation))
                self._build_layer(SpatialDropout1D(rate=self.dropout_rate))

            if self.nb_filters != input_shape[-1]:
                # 1x1 conv to match the shapes (channel dimension).
                name = 'matching_conv1D'
                with K.name_scope(name):
                    # make and build this layer separately because it directly uses input_shape
                    self.shape_match_conv = Conv1D(
                        filters=self.nb_filters,
                        kernel_size=1,
                        padding='same',
                        name=name,
                        kernel_initializer=self.kernel_initializer)
            else:
                name = 'matching_identity'
                self.shape_match_conv = Lambda(lambda x: x, name=name)

            with K.name_scope(name):
                self.shape_match_conv.build(input_shape)
                self.res_output_shape = self.shape_match_conv.compute_output_shape(
                    input_shape)

            self._build_layer(Activation(self.activation))
            self.final_activation = Activation(self.activation)
            self.final_activation.build(
                self.res_output_shape)  # probably isn't necessary

            # this is done to force Keras to add the layers in the list to self._layers
            for layer in self.layers:
                self.__setattr__(layer.name, layer)
            self.__setattr__(self.shape_match_conv.name, self.shape_match_conv)
            self.__setattr__(self.final_activation.name, self.final_activation)

            super(ResidualBlock, self).build(
                input_shape)  # done to make sure self.built is set True
Beispiel #15
0
# Hyper params
NFOLDS = 7
BATCH_SIZE = 128
EPOCHS = 150
BAGGING_ALPHA = 0.75
SEEDS = [23, 228, 1488, 1998, 2208, 2077, 404]
KFOLDS = 10
label_smoothing_alpha = 0.0005
P_MIN = label_smoothing_alpha
P_MAX = 1 - P_MIN

# Define model
model = Sequential([
    BatchNormalization(),
    WeightNormalization(Dense(1024, activation="relu")),
    BatchNormalization(),
    Dropout(0.25),
    WeightNormalization(Dense(512, activation="relu")),
    BatchNormalization(),
    Dropout(0.25),
    WeightNormalization(Dense(256, activation="relu")),
    BatchNormalization(),
    Dropout(0.25),
    WeightNormalization(Dense(num_labels, activation="sigmoid"))
])

def logloss(y_true, y_pred):
    y_pred = tf.clip_by_value(y_pred,P_MIN,P_MAX)
    return -K.mean(y_true*K.log(y_pred) + (1-y_true)*K.log(1-y_pred))
Beispiel #16
0
    def __init__(self, block_num, filter_num, kernel_size, dilation_rate,
                 window_size, use_bias, kernel_initializer, dropout_rate,
                 dropout_format, activation, final_activation):
        super(BasicTCNBlock, self).__init__()

        self.dropout_rate = dropout_rate
        valid_dropout_formats = {"channel", "timestep", "all"}
        if dropout_format not in valid_dropout_formats:
            raise ValueError("Dropout format must be one of %r." %
                             valid_dropout_formats)
        if dropout_format == "channel":
            self.noise_shape = [1, filter_num]
        elif dropout_format == "timestep":
            self.noise_shape = [window_size, 1]
        else:
            self.noise_shape = [window_size, filter_num]

        self.tcn_1 = tf.keras.layers.Conv1D(
            filters=filter_num,
            kernel_size=kernel_size,
            padding="causal",
            dilation_rate=dilation_rate,
            use_bias=use_bias,
            kernel_initializer=kernel_initializer,
            name=F"{block_num}_tcn_1")
        self.weight_norm_layer_1 = WeightNormalization(
            self.tcn_1, data_init=False, name=F"{block_num}_wn_1")

        self.tcn_2 = tf.keras.layers.Conv1D(
            filters=filter_num,
            kernel_size=kernel_size,
            padding="causal",
            dilation_rate=dilation_rate,
            use_bias=use_bias,
            kernel_initializer=kernel_initializer,
            name=F"{block_num}_tcn_2")
        self.weight_norm_layer_2 = WeightNormalization(
            self.tcn_2, data_init=False, name=F"{block_num}_wn_2")

        self.tcn_3 = tf.keras.layers.Conv1D(
            filters=filter_num,
            kernel_size=1,
            padding="causal",
            dilation_rate=dilation_rate,
            use_bias=use_bias,
            kernel_initializer=kernel_initializer,
            name=F"{block_num}_tcn_3")
        self.weight_norm_layer_3 = WeightNormalization(
            self.tcn_3, data_init=False, name=F"{block_num}_wn_3")

        self.dropout_layer_1 = tf.keras.layers.Dropout(
            rate=self.dropout_rate,
            noise_shape=self.noise_shape,
            name=F"{block_num}_dropout_1")

        self.dropout_layer_2 = tf.keras.layers.Dropout(
            rate=self.dropout_rate,
            noise_shape=self.noise_shape,
            name=F"{block_num}_dropout_2")

        self.activation = tf.keras.layers.Activation(activation)

        self.final_activation = tf.keras.layers.Activation(final_activation)