예제 #1
0
 def _absDifference(self, input1, input2):
     """
     Compute absolute value of difference of two tensor
     Parameters:
     - input1  -- 1-st operand
     - input2  -- 2-nd operand
     """
     _diff1 = layers.Subtract()([input1, input2])
     _diff2 = layers.Subtract()([input2, input1])
     #return layers.Maximum()([_diff1, _diff2])
     return layers.Minimum()([_diff1, _diff2])
def build_model():
    x1 = layers.Input(shape = (160, 160, 1))
    x2 = layers.Input(shape = (160, 160, 1))

    # share weights both inputs
    inputs = layers.Input(shape = (160, 160, 1))
    feature = layers.Conv2D(32, kernel_size = 3, padding = 'same', activation = 'relu')(inputs)
    feature = layers.MaxPooling2D(pool_size = 2)(feature)
    feature = layers.Conv2D(32, kernel_size = 3, padding = 'same', activation = 'relu')(feature)
    feature = layers.MaxPooling2D(pool_size = 2)(feature)
    feature_model = Model(inputs = inputs, outputs = feature)

    # two feature models that sharing weights
    x1_net = feature_model(x1)
    x2_net = feature_model(x2)

    # subtract features
    net = layers.Subtract()([x1_net, x2_net])
    net = layers.Conv2D(32, kernel_size = 3, padding = 'same', activation = 'relu')(net)
    net = layers.MaxPooling2D(pool_size = 2)(net)
    net = layers.Flatten()(net)
    net = layers.Dense(64, activation = 'relu')(net)
    net = layers.Dense(1, activation = 'sigmoid')(net)
    model = Model(inputs = [x1, x2], outputs = net)

    # compile
    model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['acc'])

    # show summary
    model.summary()

    return model
예제 #3
0
    def _create_latent_space_loss(self, latent_space_1, latent_space_2):
        subtract_out = layers.Subtract()([latent_space_1, latent_space_2])
        power_out = layers.Lambda(lambda x: backend.pow(subtract_out, 2))(subtract_out)
        sum_out = layers.Lambda(lambda x: backend.sum(power_out, axis=[1,2,3]))(power_out)
        mean_out = layers.Lambda(lambda x: backend.mean(sum_out, axis=0))(sum_out)
        reg_out = layers.Lambda(lambda x: x * self.Regularization.latent_space_lambda)(mean_out)
        identity_out = layers.Lambda(lambda x: x, name="latent_space_loss")(reg_out)

        return identity_out
예제 #4
0
    def build_model(self):
        # Conv layers.
        conv_8 = keras.layers.Conv2D(filters=256,
                                     kernel_size=[3, 3],
                                     strides=(1, 1),
                                     padding='valid',
                                     activation='relu')

        # Dense layers.
        dense_1 = keras.layers.Dense(units=1024,
                                     activation='relu',
                                     use_bias=True)
        dense_2 = keras.layers.Dense(units=1,
                                     activation='sigmoid',
                                     use_bias=True)

        # Batch norm layers.
        bn_8 = keras.layers.BatchNormalization()
        bn_9 = keras.layers.BatchNormalization()

        # Flatten layers.
        flatten_1 = keras.layers.Flatten()

        # All layers got. Define the forward propgation.
        input_1 = layers.Input(shape=(self.img_width, self.img_height, 1),
                               name='image_input_1')
        input_2 = layers.Input(shape=(self.img_width, self.img_height, 1),
                               name='image_input_2')
        output_1 = self.feature_net(input_1)
        output_2 = self.feature_net(input_2)

        sub = layers.Subtract()([output_1, output_2])

        # |== Layer 5 ==|
        x = conv_8(sub)
        x = bn_8(x)

        # |== Layer 6 ==|
        x = flatten_1(x)
        x = dense_1(x)
        x = bn_9(x)
        out_res = dense_2(x)

        model = Model(inputs=[input_1, input_2], outputs=out_res)

        # show summary
        print('Main Model Summary')
        model.summary()

        # compile
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizers.Adam(lr=1e-4),
                      metrics=['acc'])

        return model
예제 #5
0
def build_siamese_net(encoder,
                      input_shape,
                      distance_metric='uniform_euclidean'):
    assert distance_metric in ('uniform_euclidean', 'weighted_euclidean',
                               'uniform_l1', 'weighted_l1', 'dot_product',
                               'cosine_distance')

    input_1 = layers.Input(input_shape)
    input_2 = layers.Input(input_shape)

    encoded_1 = encoder(input_1)
    encoded_2 = encoder(input_2)

    if distance_metric == 'weighted_l1':
        # This is the distance metric used in the original one-shot paper
        # https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf
        embedded_distance = layers.Subtract()([encoded_1, encoded_2])
        embedded_distance = layers.Lambda(lambda x: K.abs(x))(
            embedded_distance)
        output = layers.Dense(1, activation='sigmoid')(embedded_distance)
    elif distance_metric == 'uniform_euclidean':
        # Simpler, no bells-and-whistles euclidean distance
        # Still apply a sigmoid activation on the euclidean distance however
        embedded_distance = layers.Subtract(name='subtract_embeddings')(
            [encoded_1, encoded_2])
        # Sqrt of sum of squares
        embedded_distance = layers.Lambda(
            lambda x: K.sqrt(K.sum(K.square(x), axis=-1, keepdims=True)),
            name='euclidean_distance')(embedded_distance)
        output = layers.Dense(1, activation='sigmoid')(embedded_distance)
    elif distance_metric == 'cosine_distance':
        raise NotImplementedError
        # cosine_proximity = layers.Dot(axes=-1, normalize=True)([encoded_1, encoded_2])
        # ones = layers.Input(tensor=K.ones_like(cosine_proximity))
        # cosine_distance = layers.Subtract()([ones, cosine_proximity])
        # output = layers.Dense(1, activation='sigmoid')(cosine_distance)
    else:
        raise NotImplementedError

    siamese = Model(inputs=[input_1, input_2], outputs=output)

    return siamese
예제 #6
0
def model():
    input_1 = layers.Input(shape=target_size + (1, ))
    input_2 = layers.Input(shape=target_size + (1, ))

    subtract = layers.Subtract()([input_2, input_1])
    #subtract = layers.Subtract()([subtract, input_1])
    x = layers.MaxPooling3D(8)(subtract)

    model = Model(inputs=[input_1, input_2], outputs=x)

    return model
예제 #7
0
    def get_edges(xi_xj):
        """
        Get the edge features, derived from the central point and
        the neighboring points.

        Output shape: (bs, n_nodes, n_neighbors, 2*n_features)

        """
        xi, xj = xi_xj
        dif = layers.Subtract()([xi, xj])
        x = layers.Concatenate(axis=-1)([xi, dif])
        return x
예제 #8
0
	def __init__(self,
				 shared_dim,
				 quality_dim,
				 activation,
				 **kwargs):
		super(ContrastiveLayer, self).__init__(**kwargs)
		self.dense_share  = layers.Dense(shared_dim, 
										 activation=activation)
		self.dense_mean   = layers.Dense(quality_dim)
		self.dense_logstd = layers.Dense(quality_dim)
		self.sub  = layers.Subtract()		
		self.clip = layers.Lambda(lambda x:K.clip(x, -20, 2))
		self.exp  = layers.Lambda(lambda x:K.exp(x))
    def get_reflection_recovery_and_removal_block():
        def get_conv_block(f=64, k=3, s=1):
            model = keras.Sequential()
            model.add(
                layers.Conv2D(filters=f,
                              strides=s,
                              kernel_size=k,
                              padding='same'))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU())
            return model

        def get_deconv_block(f=64, k=3, s=1):
            model = keras.Sequential()
            model.add(
                layers.Conv2DTranspose(filters=f,
                                       strides=s,
                                       kernel_size=k,
                                       padding='same'))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU())
            return model

        inp = keras.Input(shape=(None, None, 3))
        # 256 -> 128
        conv1 = get_conv_block(f=32, s=2)(inp)
        conv2 = get_conv_block(f=32, s=1)(conv1)

        # 128 -> 64
        conv3 = get_conv_block(f=64, s=2)(conv2)
        conv4 = get_conv_block(f=64, s=1)(conv3)

        # 64 -> 32
        conv5 = get_conv_block(f=128, s=2)(conv4)
        conv6 = get_conv_block(f=128, s=1)(conv5)

        deconv6 = get_deconv_block(f=128, s=1)(conv6)
        deconv5 = get_deconv_block(f=128, s=2)(deconv6)

        concat1 = tf.concat([conv4, deconv5], axis=3)
        deconv4 = get_deconv_block(f=64, s=1)(concat1)
        deconv3 = get_deconv_block(f=64, s=2)(deconv4)

        concat2 = tf.concat([conv2, deconv3], axis=3)
        deconv2 = get_deconv_block(f=32, s=1)(concat2)
        deconv1 = get_deconv_block(f=3, s=2)(deconv2)

        r = layers.Subtract()([inp, deconv1])

        return keras.Model(inp, r)
예제 #10
0
def get_dncnn(image_size: int = 32, image_chnls: int = 3,
  depth: int = 17, n_channels: int = 64) -> tf.keras.Model:
  """Constructs a DNCNN model.

  :param image_size: size of the images (int)
  :param image_chnls: number of channels in the inputs images (int)
  :param depth: depth of the network (int)
  :param n_channels: number of channels in the convolutional layers (int)
  :return: DNCNN model
  """
  inputs = layers.Input((image_size, image_size, image_chnls))
  outputs = run_dncnn(inputs, depth=depth, n_channels=n_channels)
  outputs = layers.Subtract()([inputs, outputs])
  return tf.keras.Model(inputs, outputs)
def kernel_nn(data, nodes=16):
    d1, d2 = data  # get xi ("central" pixel) and xj ("neighborhood" pixels)

    dif = layers.Subtract()([d1, d2])
    x = layers.Concatenate(axis=-1)([d1, dif])

    x = layers.Dense(nodes, use_bias=False, activation="relu")(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dense(nodes, use_bias=False, activation="relu")(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dense(nodes, use_bias=False, activation="relu")(x)
    x = layers.BatchNormalization()(x)
    return x
예제 #12
0
def SiameseNetwork(input_shape):
    inp_1 = tf.keras.Input(shape=input_shape)
    inp_2 = tf.keras.Input(shape=input_shape)
    
    encoder1 = Encoder(input_shape = (5388, 20, 3), embedding_dimension = 128)
    encoder2 = Encoder(input_shape = (5388, 20, 3), embedding_dimension = 128)

    #Encode each branch
    embeds1 = encoder1(inp_1)
    embeds2 = encoder2(inp_2)

    #Siamese network
    embedded_distance = layers.Subtract(name='subtract_embeddings')([embeds1, embeds2])
    embedded_distance1 = layers.Lambda(lambda x: tf.sqrt(tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)), name='euclidean_distance')(embedded_distance)
    siamese_out = layers.Dense(2, activation='sigmoid', name="OutputLayer")(embedded_distance1)

    #Model
    siamesemodel = tf.keras.Model(inputs=[inp_1,inp_2], outputs = siamese_out)
    return (siamesemodel, encoder1, encoder2)
예제 #13
0
def model(embedding_size, field_vocab_size=[], hidden_units=[4,4,4], dropout=0.5):
    F = len(field_vocab_size)

    # prepare embeddings
    inputs = []
    embed_list = []
    embed_one_list = []
    for i, vocab_size in enumerate(field_vocab_size):
        in_ = keras.Input(shape=(1,))
        inputs.append(in_)
        embed_list.append(layers.Embedding(vocab_size, embedding_size, input_length=1)(in_))
        embed_one_list.append(layers.Embedding(vocab_size, 1, input_length=1)(in_))
    embed_list = layers.concatenate(embed_list, axis=1) # none, F, K

    fm_first_in = layers.concatenate(embed_one_list, axis=1) # None, F, 1
    fm_first_in = layers.Lambda(lambda x: backend.squeeze(x, axis=2))(fm_first_in)

    # dense layer
    dropouts = [dropout] * len(hidden_units)
    weight_init = keras.initializers.glorot_uniform()

    deep_in = layers.Reshape((F*embedding_size,))(embed_list)
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        z = layers.Dense(units=h, kernel_initializer=weight_init)(deep_in)
        z = layers.BatchNormalization(axis=-1)(z)
        z = layers.Activation("relu")(z)
        z = layers.Dropout(d,seed=d * i)(z) if d > 0 else z
    deep_out = layers.Dense(units=1, activation=tf.nn.softmax, kernel_initializer=weight_init)(z)
    # deep_out: None, 1

    # fm layer
    fm_first_order = layers.Lambda(lambda x: backend.sum(x, axis=1))(fm_first_in) #None, 1

    emb_sum_squared = layers.Lambda(lambda x: backend.square(backend.sum(x, axis=1)))(embed_list) #none, K
    emb_squared_sum = layers.Lambda(lambda x: backend.sum(backend.square(x), axis=1))(embed_list) #none, K
    fm_second_order = layers.Subtract()([emb_sum_squared, emb_squared_sum])
    fm_second_order = layers.Lambda(lambda x: backend.sum(x, axis=1))(fm_second_order) #none, 1
    fm_out = layers.Add()([fm_first_order, fm_second_order])

    out = layers.Add()([deep_out, fm_out])
    out = layers.Activation(activation='sigmoid')(out)
    model = keras.Model(inputs=inputs, outputs=out)
    return model
예제 #14
0
    def build_model(self):
        input = keras.Input(shape=self.state_size)
        x = layers.Conv2D(32, 8, strides=(4, 4), activation="relu")(input)
        x = layers.Conv2D(64, 4, strides=(2, 2), activation="relu")(x)
        #x = layers.Conv2D(64, 3, strides=(2,2), activation="relu")(x)
        x = layers.Flatten()(x)
        value_fc = layers.Dense(512, activation="relu")(x)
        value = layers.Dense(1)(value_fc)
        advantage_fc = layers.Dense(512, activation="relu")(x)
        advantage = layers.Dense(NUM_ACTIONS)(advantage_fc)
        advantage_norm = layers.Subtract()(
            [advantage,
             tf.reduce_mean(advantage, axis=1, keepdims=True)])
        aggregation = layers.Add()([value, advantage_norm])
        output = layers.Dense(NUM_ACTIONS)(aggregation)

        model = keras.Model(input, output)
        model.compile(optimizer=keras.optimizers.Adam(self.learning_rate),
                      loss=self.PER_loss())
        return model
예제 #15
0
    def build_model(self):
        # shared_model = build_stacked_rnn_model(self)
        shared_model = self.base_network_build_fn()

        # shared_model = build_multi_attention_model(self)
        t_status = shared_model.output

        # 攻击动作,则采用基础标量 + 均值为0的向量策略
        value = layers.Dense(1)(t_status)
        # 这力可以直接广播,不需要拼接
        #         # value = concatenate([value] * self.action_num)
        a = layers.Dense(self.action_num)(t_status)
        mean = layers.Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(a)
        advantage = layers.Subtract()([a, mean])
        q = layers.Add()([value, advantage])
        model = Model(shared_model.input, q, name=self.model_type)

        model.compile(optimizer=Adam(lr=self.lr), loss='mse')

        return model
예제 #16
0
def build_q_network(n_actions: int, input_shape: Tuple[int]=(84, 84), history_length: int=4):
    model_input = layers.Input(shape=(input_shape[0], input_shape[1], history_length))
    x = layers.Lambda(lambda layer: layer / 255)(model_input)  # normalize by 255

    x = layers.Conv2D(32, (8, 8), strides=4, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = layers.Conv2D(64, (4, 4), strides=2, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = layers.Conv2D(64, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = layers.Conv2D(1024, (7, 7), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)

    # Split into value and advantage streams
    val_stream, adv_stream = layers.Lambda(lambda w: tf.split(w, 2, 3))(x)  # custom splitting layer

    val_stream = layers.Flatten()(val_stream)
    val = layers.Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)

    adv_stream = layers.Flatten()(adv_stream)
    adv = layers.Dense(n_actions, kernel_initializer=VarianceScaling(scale=2.))(adv_stream)

    # Combine streams into Q-Values
    reduce_mean = layers.Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True))  # custom layer for reduce mean

    q_vals = layers.Add()([val, layers.Subtract()([adv, reduce_mean(adv)])])

    # Build model
    model = Model(model_input, q_vals)
    # model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())

    return model

# # Read more about de dueling
# class DuelingDQN(Model):
#     """
#     This is a implementation of Dueling Network propose in https://arxiv.org/pdf/1511.06581.pdf
#     """
#     def __init__(self, n_action: int, input_shape: Tuple[int], history_length: int = 4) -> None:
#         super(DuelingDQN, self).__init__()
        
#         self._input_shape = input_shape
#         self.history_length = history_length
        
#         # Define convolutional layers
#         self.convolution = Sequential([
#             layers.Lambda(lambda inputs: inputs / 255.), # Normalize input 
#             layers.Conv2D(32, kernel_size=(8, 8), strides=4, activation='relu'),
#             layers.Conv2D(64, kernel_size=(4, 4), strides=2, activation='relu'),
#             layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'),
#             layers.Conv2D(1024, kernel_size=(7, 7), strides=1, activation='relu')
#         ])
        
#         # split between value_stream and advantage_stream
#         self.split = layers.Lambda(lambda w: split(w, 2, 3))
        
#         # Advantage stream forward
#         self.advantage_stream = Sequential([
#             layers.Flatten(),
#             layers.Dense(n_action, kernel_initializer=VarianceScaling(scale=2.))
#         ])
        
#         # Value stream forward
#         self.value_stream = Sequential([
#             layers.Flatten(),
#             layers.Dense(1, kernel_initializer=VarianceScaling(scale=2.))
#         ])
        
#         # Putting all together
#         self.reduce_mean = layers.Lambda(lambda w: reduce_mean(w, axis=1, keepdims=True))
#         self.subtract = layers.Subtract()
#         self.outputs = layers.Add()
        
#     def call(self, inputs: Tensor) -> Tensor:
#         x = layers.Input(shape=(self._input_shape[0], self._input_shape[1], self.history_length))(inputs)
#         x = self.convolution(x)
        
#         value_stream, advantage_stream = self.split(x)
#         value_stream = self.value_stream(value_stream)
#         advantage_stream = self.advantage_stream(advantage_stream)
#         subtracted = self.subtract([advantage_stream, self.reduce_mean(advantage_stream)])
        
#         return self.outputs([value_stream, subtracted])
        
    shape=(time_sound, nfreqs,
           1))  # define input (rows, columns, channels (only one in my case))
model_l_conv1 = layers.Conv2D(32, (5, 5), activation='relu', padding='same')(
    in1)  # define first layer and input to the layer
model_l_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_l_conv1)
model_l_conv1_mp_do = layers.Dropout(0.2)(model_l_conv1_mp)

# CNN 1 - right channel
in2 = layers.Input(shape=(time_sound, nfreqs, 1))  # define input
model_r_conv1 = layers.Conv2D(32, (5, 5), activation='relu', padding='same')(
    in2)  # define first layer and input to the layer
model_r_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_r_conv1)
model_r_conv1_mp_do = layers.Dropout(0.2)(model_r_conv1_mp)

# CNN 2 - merged
model_final_merge = layers.Subtract()(
    [model_l_conv1_mp_do, model_r_conv1_mp_do])
model_final_conv1 = layers.Conv2D(32, (5, 5),
                                  activation='relu',
                                  padding='same')(model_final_merge)
model_final_conv1_mp = layers.MaxPooling2D(pool_size=(2, 2))(model_final_conv1)
model_final_conv1_mp_do = layers.Dropout(0.2)(model_final_conv1_mp)

# CNN 3 - merged
model_final_conv2 = layers.Conv2D(64, (5, 5),
                                  activation='relu',
                                  padding='same')(model_final_conv1_mp_do)
model_final_conv2_mp = layers.MaxPooling2D(pool_size=(2, 2))(model_final_conv2)
model_final_conv2_mp_do = layers.Dropout(0.2)(model_final_conv2_mp)

# CNN 4 - merged
model_final_conv3 = layers.Conv2D(128, (5, 5),
dropout = 0.01
activation = "relu"
optimizer = 'Adam'
loss = 'categorical_crossentropy'
#metrics= ['acc']
metrics = ['categorical_accuracy']
class_mode = 'categorical'
batch_size = 200
epochs = 100
verbose = 2
lr = 0.0001

# Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for the three color channels: R, G, and B
img_input1 = layers.Input(shape=target_size + (1, ))
img_input2 = layers.Input(shape=target_size + (1, ))
img_input = layers.Subtract()([img_input2, img_input1])
net = layers.Conv3D(kernel_size,
                    conv_window,
                    activation=activation,
                    padding='valid',
                    strides=4)(img_input)
x = layers.MaxPooling3D(pooling_window)(net)
x = layers.Conv3D(2 * kernel_size,
                  conv_window,
                  activation=activation,
                  padding='same',
                  strides=1)(x)
x = layers.MaxPooling3D(pooling_window)(x)
x = layers.Conv3D(6 * kernel_size,
                  conv_window,
                  activation=activation,
예제 #19
0
    def build(self, name: str, show_summary=False):
        input_layer = layers.Input(shape=(*Constants.FRAME_SHAPE,
                                          self.actions),
                                   name='input_layer')
        scaling_layer = layers.Lambda(lambda layer: layer / 255,
                                      name="scale")(input_layer)

        # Use the same model as the paper
        # "The first convolutional layer has 32 8x8 filters with stride 4" and applies a rectifier nonlinearity
        hidden_layer1 = layers.Conv2D(
            filters=32,
            kernel_size=(8, 8),
            strides=4,
            activation=tf.nn.relu,
            kernel_initializer=keras.initializers.VarianceScaling(scale=2),
            padding="valid",
            use_bias=False,
            name='hidden_layer1')(scaling_layer)
        # "The second convolves 64 4×4 filters with stride 2" followed by a rectifier nonlinearity
        hidden_layer2 = layers.Conv2D(
            filters=64,
            kernel_size=(4, 4),
            strides=2,
            activation=tf.nn.relu,
            kernel_initializer=keras.initializers.VarianceScaling(scale=2),
            padding="valid",
            use_bias=False,
            name='hidden_layer2')(hidden_layer1)
        # "The third and final convolution layer consists 64 3x3 filters with stride 1" followed by a rectifier
        hidden_layer3 = layers.Conv2D(
            filters=64,
            kernel_size=(3, 3),
            strides=1,
            activation=tf.nn.relu,
            kernel_initializer=keras.initializers.VarianceScaling(scale=2),
            padding="valid",
            use_bias=False,
            name='hidden_layer3')(hidden_layer2)

        # It is recommended to use another convolution layer instead of 2 separate fully connected layers with 512 units
        hidden_layer4 = layers.Conv2D(
            filters=1024,
            kernel_size=(7, 7),
            strides=1,
            activation=tf.nn.relu,
            kernel_initializer=keras.initializers.VarianceScaling(scale=2),
            padding="valid",
            use_bias=False,
            name='hidden_layer4')(hidden_layer3)

        # Split into value and advantage streams
        value_stream, advantage_stream = layers.Lambda(
            lambda layer: tf.split(layer, 2, 3), name="split")(hidden_layer4)

        # Flatten each stream
        flattened_value_stream = layers.Flatten(
            name="value_flattened")(value_stream)
        flattened_advantage_stream = layers.Flatten(
            name="advantage_flattened")(advantage_stream)

        # Create value stream output layer: 1 output
        value_output_layer = layers.Dense(
            1,
            kernel_initializer=keras.initializers.VarianceScaling(scale=2),
            name="value_output")(flattened_value_stream)

        # Create advantage stream output layer: output per action
        advantage_output_layer = layers.Dense(
            self.actions, name="advantage_output")(flattened_advantage_stream)

        # Q value given an action and state =
        # value output of state +
        # (advantage output given an action and state - reduction mean of advantage output over all actions)

        # reduction mean of advantage output over all actions
        reduce_mean_layer = layers.Lambda(
            lambda layer: tf.reduce_mean(layer, axis=1, keepdims=True),
            name="reduce_mean")

        # (advantage output given an action and state - reduction mean of advantage output over all actions)
        subtract_layer = layers.Subtract(name="q_sub")([
            advantage_output_layer,
            reduce_mean_layer(advantage_output_layer)
        ])

        # Q values given action and state
        q_values = layers.Add(name="q_output")(
            [value_output_layer, subtract_layer])

        # Build model using Huber loss and Adam optimizer
        model = tf.keras.models.Model(input_layer, q_values, name=name)
        model.compile(optimizer=keras.optimizers.Adam(
            learning_rate=HyperParams.LEARNING_RATE),
                      loss=tf.keras.losses.Huber())

        if show_summary:
            # Show Summary
            model.summary()

        return model