def __init__(self, d_model, num_heads, dff, rate=0.1): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(d_model, num_heads) self.ffn = PointWiseFeedForwardNetwork(d_model, dff) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.dropout1 = Dropout(rate) self.dropout2 = Dropout(rate)
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super(TransformerBlock, self).__init__() self.att = MultiHeadSelfAttention(embed_dim, num_heads) self.ffn = tf.keras.Sequential([ Dense(ff_dim, activation="relu"), Dense(embed_dim), ]) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.dropout1 = Dropout(rate) self.dropout2 = Dropout(rate)
def __init__(self, original_dim, intermediate_dim=64, name="image", **kwargs): super(OSIC_Image, self).__init__(name=name, **kwargs) self.layers = [] self.layers.append(InputLayer(input_shape=original_dim)) self.layers.append( Conv3D(filters=8, kernel_size=5, strides=3, padding="same", kernel_initializer=GlorotUniform(seed=0), input_shape=original_dim)) self.layers.append(LayerNormalization()) self.layers.append(Activation('elu')) self.layers.append( Conv3D(filters=16, kernel_size=2, strides=2, padding="same", kernel_initializer=GlorotUniform(seed=0))) self.layers.append(LayerNormalization()) self.layers.append(Activation('elu')) self.layers.append( Conv3D(filters=32, kernel_size=2, strides=1, padding="same", kernel_initializer=GlorotUniform(seed=0))) self.layers.append(LayerNormalization()) self.layers.append(Activation('elu')) self.layers.append( Conv3D(filters=64, kernel_size=2, strides=1, padding="same", kernel_initializer=GlorotUniform(seed=0))) self.layers.append(LayerNormalization()) self.layers.append(Activation('elu')) # self.layers.append(Conv3DTranspose(32, 2, 1)) # self.layers.append(LayerNormalization()) # self.layers.append(Activation('elu')) # self.layers.append(Conv3DTranspose(16, 2, 1)) # self.layers.append(LayerNormalization()) #self.layers.append(Conv3D(filters=1,kernel_size=5,strides=4,kernel_initializer=GlorotUniform(seed=0))) # self.layers.append(Conv3D(filters=2,kernel_size=1,activation="softmax", kernel_initializer=GlorotUniform(seed=0))) self.layers.append(Dense(64)) self.layers.append(LayerNormalization())
def build(self, input_shape): self.attn_multi = MultiAttention(self.d_k, self.d_v, self.n_heads) self.attn_dropout = Dropout(self.dropout_rate) self.attn_normalize = LayerNormalization(input_shape=input_shape, epsilon=1e-6) self.ff_conv1D_1 = Conv1D(filters=self.ff_dim, kernel_size=1, activation='relu') # input_shape[0]=(batch, seq_len, 7), input_shape[0][-1] = 7 self.ff_conv1D_2 = Conv1D(filters=input_shape[0][-1], kernel_size=1) self.ff_dropout = Dropout(self.dropout_rate) self.ff_normalize = LayerNormalization(input_shape=input_shape, epsilon=1e-6)
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super(TransformerBlock, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.rate = rate self.attention = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) self.ffn = Sequential( [Dense(ff_dim, activation="relu"), Dense(embed_dim)] ) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.dropout1 = Dropout(rate) self.dropout2 = Dropout(rate)
def __init__(self, config, name, trainable=False): """ Construct the embedding module from word, position and segment embeddings. """ super().__init__(name=name, trainable=trainable) self.word_embeddings = Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=tf.initializers.TruncatedNormal( stddev=0.02), name="word_embeddings") self.position_embeddings = Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=tf.initializers.TruncatedNormal( stddev=0.02), name="position_embeddings") self.segment_embeddings = Embedding( config.type_vocab_size, config.hidden_size, embeddings_initializer=tf.initializers.TruncatedNormal( stddev=0.02), name="token_type_embeddings") # self.LayerNorm is not snake-cased to stick # with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = LayerNormalization(name="LayerNorm") self.dropout = Dropout(config.hidden_dropout_prob)
def createEncoder(self): base_model=InceptionV3(input_shape=(224,224,3),weights=None,include_top=False) x=base_model.output x=GlobalAveragePooling2D()(x) x = LayerNormalization()(x) model=Model(inputs=base_model.input,outputs=x) return model
def __init__(self, config, name, trainable=False): super().__init__(name=name, trainable=trainable) self.dense = Dense(config.hidden_size, input_shape=(config.intermediate_size, ), kernel_initializer=create_initializer( config.initializer_range), name="dense") self.LayerNorm = LayerNormalization(name="LayerNorm") self.dropout = Dropout(config.hidden_dropout_prob)
def layer_norm_tanh(_x): _out = LayerNormalization()(_x) return Activation('tanh')(_out)
def constrained_adversarial_autoencoder_Chen(z, x, dropout_rate, dropout, config): outputs = {} dim = 64 with tf.variable_scope('Encoder'): encoder = Bunch({ # Model definition 'enc_conv': Conv2D(filters=dim, kernel_size=3, padding='same'), 'enc_res1_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'), 'enc_res1_layernorm1': LayerNormalization([1, 2]), 'enc_res1_conv2': Conv2D(filters=2 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res1_layernorm2': LayerNormalization([1, 2]), 'enc_res1_shortcut1': Conv2D(filters=2 * dim, kernel_size=1, padding='same'), 'enc_res1_shortcut2': AvgPool2D(), 'enc_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'), 'enc_res2_layernorm1': LayerNormalization([1, 2]), 'enc_res2_conv2': Conv2D(filters=4 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res2_layernorm2': LayerNormalization([1, 2]), 'enc_res2_shortcut1': Conv2D(filters=4 * dim, kernel_size=1, padding='same'), 'enc_res2_shortcut2': AvgPool2D(), 'enc_res3_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res3_layernorm1': LayerNormalization([1, 2]), 'enc_res3_conv2': Conv2D(filters=8 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res3_layernorm2': LayerNormalization([1, 2]), 'enc_res3_shortcut1': Conv2D(filters=8 * dim, kernel_size=1, padding='same'), 'enc_res3_shortcut2': AvgPool2D(), 'enc_res4_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res4_layernorm1': LayerNormalization([1, 2]), 'enc_res4_conv2': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res4_layernorm2': LayerNormalization([1, 2]), 'enc_flatten': Flatten(), 'enc_dense': Dense(config.zDim), }) features, z_ = evaluate_encoder(encoder, x) outputs['z_'] = z_ with tf.variable_scope('Decoder'): decoder = Bunch({ # Model definition 'dec_1': Dense(np.prod(features.get_shape().as_list()[1:])), 'dec_res1_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'dec_res1_layernorm1': LayerNormalization([1, 2]), 'dec_res1_conv2': Conv2DTranspose(filters=8 * dim, kernel_size=3, padding='same'), 'dec_res1_layernorm2': LayerNormalization([1, 2]), 'dec_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'), 'dec_res2_layernorm1': LayerNormalization([1, 2]), 'dec_res2_conv2': Conv2DTranspose(filters=4 * dim, kernel_size=3, strides=2, padding='same'), 'dec_res2_layernorm2': LayerNormalization([1, 2]), 'dec_res2_shortcut': Conv2DTranspose(filters=4 * dim, kernel_size=1, padding='same', strides=2), 'dec_res3_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'), 'dec_res3_layernorm1': LayerNormalization([1, 2]), 'dec_res3_conv2': Conv2DTranspose(filters=2 * dim, kernel_size=3, strides=2, padding='same'), 'dec_res3_layernorm2': LayerNormalization([1, 2]), 'dec_res3_shortcut': Conv2DTranspose(filters=2 * dim, kernel_size=1, padding='same', strides=2), 'dec_res4_conv1': Conv2D(filters=dim, kernel_size=3, padding='same'), 'dec_res4_layernorm1': LayerNormalization([1, 2]), 'dec_res4_conv2': Conv2DTranspose(filters=dim, kernel_size=3, strides=2, padding='same'), 'dec_res4_layernorm2': LayerNormalization([1, 2]), 'dec_res4_shortcut': Conv2DTranspose(filters=dim, kernel_size=1, padding='same', strides=2), # post process 'dec_layernorm': LayerNormalization([1, 2]), 'dec_conv': Conv2D(1, 1, padding='same'), }) outputs['x_hat'] = x_hat = evaluate_decoder(decoder, z_, features.get_shape().as_list()[1:]) # projecting reconstruction to latent space for constrained part outputs['z_rec'] = evaluate_encoder(encoder, x_hat)[1] # Discriminator with tf.variable_scope('Discriminator'): discriminator = [ Dense(400, activation=leaky_relu), Dense(200, activation=leaky_relu), Dense(1) ] # fake temp_out = z_ for layer in discriminator: temp_out = layer(temp_out) outputs['d_'] = temp_out # real temp_out = z for layer in discriminator: temp_out = layer(temp_out) outputs['d'] = temp_out # reparametrization epsilon = tf.random_uniform([], 0.0, 1.0) outputs['z_hat'] = z_hat = epsilon * z + (1 - epsilon) * z_ temp_out = z_hat for layer in discriminator: temp_out = layer(temp_out) outputs['d_hat'] = temp_out return outputs