示例#1
0
def cycle_classifier(input_shape=(128, 128, 1)):
    noisy_to_clean_model = generator(transform_repeater=16, name='noisy2clean')
    clean_to_noisy_model = generator(transform_repeater=16, name='clean2noisy')

    low_pass_filter_model = lpf()

    noisy_input_layer = Input(shape=input_shape, name='noisy_input')
    blurry_noisy_input = low_pass_filter_model(noisy_input_layer)
    noisy_lpf_output_layer = low_pass_filter_model(
        clean_to_noisy_model(
            noisy_to_clean_model(noisy_input_layer)))  # LPF is applied
    noisy_to_noisy_lpf_model = Model(noisy_input_layer,
                                     noisy_lpf_output_layer,
                                     name='noisy2noisyLPF')

    clean_input_layer = Input(shape=input_shape, name='clean_input')
    clean_output_layer = noisy_to_clean_model(
        clean_to_noisy_model(clean_input_layer))
    clean_to_clean_model = Model(clean_input_layer,
                                 clean_output_layer,
                                 name='clean2clean')

    should_be_zero_lpf_difference_layer = Subtract(name='lpf_difference')(
        [noisy_lpf_output_layer, blurry_noisy_input])
    should_be_zero_clear_difference_layer = Subtract(name='clear_difference')(
        [clean_input_layer, clean_output_layer])

    cycle_training_model = Model(inputs=[noisy_input_layer, clean_input_layer],
                                 outputs=[
                                     should_be_zero_lpf_difference_layer,
                                     should_be_zero_clear_difference_layer
                                 ],
                                 name='cycle_training')

    return (cycle_training_model, noisy_to_clean_model, clean_to_noisy_model)
 def build_model(self):
     outputhelper = []
     for j in range(self.N):
         strategy = self.price
         strategyeval = self.tradeeval
         for k in range(self.d):
             strategy = self.layers[k + (j) * self.d](strategy)  # strategy at j is the alpha at j
             strategyeval = self.layers[k + (j) * self.d](strategyeval)
         incr = Input(shape=(self.m,))
         logprice = Lambda(lambda x: K.log(x))(self.price)
         logprice = Add()([logprice, incr])
         pricenew = Lambda(lambda x: K.exp(x))(logprice)
         self.price = pricenew
         logwealth = Lambda(lambda x: K.log(x))(self.wealth)
         logwealth = Lambda(lambda x: x + self.r * self.T / self.N)(logwealth)
         helper1 = Multiply()([strategy, incr])
         # helper1 = Lambda()(lambda x : K.sum(x,axis=1))([helper1])
         logwealth = Add()([logwealth, helper1])
         helper2 = Multiply()([strategy, strategy])
         # helper2 = Lambda()(lambda x : K.sum(x,axis=1))([helper1])
         helper3 = Lambda(lambda x: x * self.sigma ** 2 / 2 * self.T / self.N)(helper2)
         logwealth = Subtract()([logwealth, helper3])
         helper4 = Lambda(lambda x: x * self.r * self.T / self.N)(strategy)
         logwealth = Subtract()([logwealth, helper4])
         wealthnew = Lambda(lambda x: K.exp(x))(logwealth)  # creating the wealth at time j+1
         self.inputs = self.inputs + [incr]
         outputhelper = outputhelper + [strategyeval]  # here we collect the strategies
         self.wealth = wealthnew
     self.outputs = self.wealth
     randomendowment = Lambda(lambda x: -0.0 * (K.abs(x - 1.0) + x - 1.0))(self.price)
     self.outputs = Add()([self.wealth, randomendowment])
     self.outputs = [self.outputs] + outputhelper
     self.outputs = Concatenate()(self.outputs)
     # Now return the model
     return Model(inputs=self.inputs, outputs=self.outputs)
示例#3
0
    def build_generator(self, pyramid_features=256, head_features=256):

        # Image input
        obs = Input(shape=self.img_shape)
        ren = Input(shape=self.img_shape)
        #real = Input(shape=self.img_shape)

        model_obs = self.backbone_obs(obs)
        model_ren = self.backbone_ren(ren)

        diff_P3 = Subtract()([model_obs[0], model_ren[0]])
        diff_P4 = Subtract()([model_obs[1], model_ren[1]])
        diff_P5 = Subtract()([model_obs[2], model_ren[2]])

        fusion_py = self.PFPN(diff_P3, diff_P4, diff_P5)

        head_tra = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(fusion_py)
        head_tra = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(head_tra)
        head_tra = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(head_tra)
        delta_tra = Conv2D(3, kernel_size=3)(head_tra)

        head_rot = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(fusion_py)
        head_rot = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(head_rot)
        head_rot = Conv2D(head_features,
                          kernel_size=3,
                          strides=1,
                          padding='same')(head_rot)
        delta_rot = Conv2D(4, kernel_size=3)(head_rot)
        print(delta_rot)
        #delta_rot = l2_normalize(delta_rot, axis=-1)

        delta = Concatenate(axis=-1)([delta_tra, delta_rot])

        #da_obs = self.backbone_obs(real)
        #da_ren = self.backbone_ren(real)
        #da_out_obs = Conv2D(4, kernel_size=3)(da_obs)
        #da_out_ren = Conv2D(4, kernel_size=3)(da_ren)
        #da_act_obs = Activation('sigmoid')(da_out_obs)
        #da_act_ren = Activation('sigmoid')(da_out_ren)
        #da_out = Concatenate()([da_act_obs, da_act_ren])

        #return Model(inputs=[obs, ren, real], outputs=[delta, da_out])

        return Model(inputs=[obs, ren], outputs=delta)
示例#4
0
def TriangleModel(init_shape, feature_shape, feature_model, block_PD_to_T2,
                  block_T2_to_T1, block_T1_to_PD, reconstruction_model,
                  n_layers):

    inputT1 = Input(shape=init_shape)
    inputT2 = Input(shape=init_shape)
    inputPD = Input(shape=init_shape)

    fT1 = feature_model(inputT1)
    fT2 = feature_model(inputT2)
    fPD = feature_model(inputPD)

    forward_PD_to_T2 = build_forward_model(init_shape=feature_shape,
                                           block_model=block_PD_to_T2,
                                           n_layers=n_layers)
    forward_T2_to_T1 = build_forward_model(init_shape=feature_shape,
                                           block_model=block_T2_to_T1,
                                           n_layers=n_layers)
    forward_T1_to_PD = build_forward_model(init_shape=feature_shape,
                                           block_model=block_T1_to_PD,
                                           n_layers=n_layers)

    predT2 = reconstruction_model(forward_PD_to_T2(fPD))
    predT1 = reconstruction_model(forward_T2_to_T1(fT2))
    predPD = reconstruction_model(forward_T1_to_PD(fT1))

    errT2 = Subtract()([inputT2, predT2])
    errT2 = Lambda(lambda x: K.abs(x))(errT2)
    errT2 = GlobalAveragePooling3D()(errT2)
    errT2 = Reshape((1, ))(errT2)

    errT1 = Subtract()([inputT1, predT1])
    errT1 = Lambda(lambda x: K.abs(x))(errT1)
    errT1 = GlobalAveragePooling3D()(errT1)
    errT1 = Reshape((1, ))(errT1)

    errPD = Subtract()([inputPD, predPD])
    errPD = Lambda(lambda x: K.abs(x))(errPD)
    errPD = GlobalAveragePooling3D()(errPD)
    errPD = Reshape((1, ))(errPD)

    errsum = Add()([errT2, errT1, errPD])

    model = Model(inputs=[inputT1, inputT2, inputPD], outputs=errsum)

    ##  model = Model(inputs=[inputT1,inputT2,inputPD], outputs=errsum)
    #  model = Model(inputs=inputT1, outputs=errsum)

    return model
示例#5
0
 def _build_model(input_shape, hidden_layer_sizes, activation):
     """
     Build Keras Ranker NN model (Ranknet / LambdaRank NN).
     """
     # Neural network structure
     hidden_layers = []
     for i in range(len(hidden_layer_sizes)):
         hidden_layers.append(
             Dense(hidden_layer_sizes[i],
                   activation=activation[i],
                   name=str(activation[i]) + '_layer' + str(i)))
     h0 = Dense(1, activation='linear', name='Identity_layer')
     input1 = Input(shape=(input_shape, ), name='Input_layer1')
     input2 = Input(shape=(input_shape, ), name='Input_layer2')
     x1 = input1
     x2 = input2
     for i in range(len(hidden_layer_sizes)):
         x1 = hidden_layers[i](x1)
         x2 = hidden_layers[i](x2)
     x1 = h0(x1)
     x2 = h0(x2)
     # Subtract layer
     subtracted = Subtract(name='Subtract_layer')([x1, x2])
     # sigmoid
     out = Activation('sigmoid', name='Activation_layer')(subtracted)
     # build model
     model = Model(inputs=[input1, input2], outputs=out)
     return model
示例#6
0
    def __output_layer(self, connected_layer):
        if self.network_type == 'val-adv':
            # Reference: https://www.reddit.com/r/reinforcementlearning/comments/bu02ej/help_with_dueling_dqn/
            # Value & Advantage Layer
            val = Dense(1,
                        kernel_initializer='he_uniform',
                        kernel_constraint=max_norm(5),
                        name='Value')(connected_layer)
            val = Activation('linear')(val)
            adv = Dense(self.action_size,
                        kernel_initializer='he_uniform',
                        kernel_constraint=max_norm(5),
                        name='Advantage')(connected_layer)
            adv = Activation('linear')(adv)

            # Output layer
            mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True),
                          name='Mean')(adv)
            adv = Subtract(name='Advantage_Mean')([adv, mean])
            outputs = Add(name='Value_Advantage')([val, adv])

        else:
            outputs = Dense(self.action_size,
                            kernel_initializer='he_uniform',
                            kernel_constraint=max_norm(5),
                            name='Output')(connected_layer)
            outputs = Activation('linear')(outputs)

        return outputs
    def dueling_build_model(input_dimension, output_dimension, nodes_per_layer, hidden_layer_count, learning_rate):
        inputs = Input(shape=(input_dimension,))

        # Build Advantage layer
        advantage_hidden_layer = inputs
        for _ in range(hidden_layer_count):
            advantage_hidden_layer = Dense(nodes_per_layer, activation='relu')(advantage_hidden_layer)
        predictions_advantage = Dense(output_dimension, activation='linear')(advantage_hidden_layer)

        # Build Value layer
        value_hidden_layer = inputs
        for _ in range(hidden_layer_count):
            value_hidden_layer = Dense(nodes_per_layer, activation='relu')(value_hidden_layer)
        predictions_value = Dense(1, activation='linear')(value_hidden_layer)

        # Combine layers
        advantage_average = Lambda(mean)(predictions_advantage)

        advantage = Subtract()([predictions_advantage, advantage_average])

        predictions = Add()([advantage, predictions_value])

        model = Model(inputs=inputs, outputs=predictions)
        model.compile(optimizer=Adam(lr=learning_rate), loss=Huber())
        return model
示例#8
0
def get_model():
	sentence_embedding_input 	= tf.keras.Input(shape = (LSTM_NUM_TIMESTEPS, LSTM_INPUT_DIM,), dtype = tf.float32)
	video_global_features 		= tf.keras.Input(shape = (VID_GLOBAL_FEAT,), dtype = tf.float32)
	video_local_features 		= tf.keras.Input(shape = (VID_LOCAL_FEAT,), dtype = tf.float32)
	video_temporal_features 	= tf.keras.Input(shape = (VID_TEMPORAL_FEAT,), dtype = tf.float32)

	# Sentence network
	lstm_1 = LSTM(LSTM_HIDDEN_UNITS, return_sequences = True, return_state = True)
	seq_out, hidden_out, carry_out = lstm_1(sentence_embedding_input)
	sentence_out 	= Dense(DENSE_OUTPUT_FEAT, activation=tf.nn.softmax)(hidden_out)
	sentence_out_norm = BatchNormalization()(sentence_out)

	# Video feature network
	merged_features 	= Concatenate()([video_global_features, video_local_features, video_temporal_features])
	dense_1				= Dense(DENSE_LAYER_1, activation=tf.nn.softmax)(merged_features)
	dense_1_norm 		= BatchNormalization()(dense_1)
	relu_1 				= ReLU()(dense_1_norm)
	vid_feat_out 		= Dense(DENSE_OUTPUT_FEAT, activation=tf.nn.softmax)(relu_1)
	vid_feat_out_norm 	= BatchNormalization()(vid_feat_out)

	# Loss computation
	subtract_1 		= Subtract()([vid_feat_out_norm, sentence_out_norm])

	model = Model(inputs  = [video_temporal_features, video_global_features, video_local_features, sentence_embedding_input], 
				  outputs = subtract_1)	
	return model
示例#9
0
    def create_model(self, input_shape, output_num):
        # This returns a tensor
        inputs = Input(shape=input_shape)

        # a layer instance is callable on a tensor, and returns a tensor
        x = Conv2D(12, (4, 4), input_shape=input_shape, data_format="channels_last", padding='same', activation='relu', kernel_initializer='he_normal')(inputs)
        x = Conv2D(24, (4, 4), padding='same', activation='relu', kernel_initializer='he_normal')(x)
        x = Conv2D(48, (4, 4), padding='same', activation='relu', kernel_initializer='he_normal')(x)
        x = Flatten()(x)
        x = Dense(64, activation='relu', kernel_initializer='he_normal')(x)
        x = Dense(48, activation='relu', kernel_initializer='he_normal')(x)

        value = Dense(32, activation='relu', kernel_initializer='he_normal')(x)
        value = Dense(1, activation=self.fin_activation, kernel_initializer='he_normal')(value)

        advantage = Dense(32, activation='relu', kernel_initializer='he_normal')(x)
        advantage = Dense(output_num, activation=self.fin_activation, kernel_initializer='he_normal')(advantage)
        mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(advantage)
        advantage = Subtract()([advantage, mean])

        predictions = Add()([value, advantage])

        # This creates a model that includes
        #  the Input layer and the stacked output layers
        model = FuncModel(inputs=inputs, outputs=predictions)
        # model.compile happens in baseclass method compile_model()
        return model
示例#10
0
    def __init__(self, depth):
        super().__init__()

        self.model_layers = []
        self.model_layers.append(
            ZeroPadding2D(padding=15, data_format='channels_last'))
        self.model_layers.append(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   kernel_initializer='Orthogonal',
                   padding='same',
                   activation='relu'))
        for i in range(depth - 2):
            self.model_layers.append(
                Conv2D(filters=64,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       kernel_initializer='Orthogonal',
                       padding='same',
                       use_bias=False))
            self.model_layers.append(BatchNormalization())
            self.model_layers.append(Activation('relu'))

        self.model_layers.append(
            Conv2D(filters=3,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   kernel_initializer='Orthogonal',
                   padding='same',
                   use_bias=False))
        self.model_layers.append(
            Cropping2D(cropping=15, data_format='channels_last'))

        self.subtract = (Subtract())
示例#11
0
def Dehaze(img_shape=(256, 256, 3)):

    img_input = Input(img_shape, name='img_input')
    trans = transmission_map_generator(img_shape)(img_input)
    atmos = atmospheric_light_generator(img_shape)(img_input)

    # $trans_{reciprocal} = \frac{1}{trans + 10^{-10}}$
    trans_reciprocal = Lambda(
        function=lambda x: 1 / (K.abs(x) + 10**-10))(trans)

    atmos = compose(
        AvgPool2D(),
        LeakyReLU(0.2),
        UpSampling2D()
    )(atmos)

    # $dehaze = (input - atmos) \times trans^{-1} + atmos$
    dehaze = Subtract()([img_input, atmos])
    dehaze = Multiply()([dehaze, trans_reciprocal])
    dehaze = Add()([dehaze, atmos])

    dehaze = compose(
        Concatenate(),
        Conv2D(6, kernel_size=3, strides=1, padding='same'),
        LeakyReLU(alpha=0.2),
        Conv2D(20, kernel_size=3, strides=1, padding='same'),
        LeakyReLU(alpha=0.2),
        Concat_Samping_Block([32, 16, 8, 4], kernel_size=1),
        Conv2D(3, kernel_size=3, strides=1, padding='same'),
        Activation('tanh')
    )([dehaze, img_input])

    return Model(inputs=[img_input], outputs=[dehaze, trans, atmos])
示例#12
0
    def create_d3qn_model(self):
        # TODO: Add LSTM
        input = Input(self.input_dim)
        out = Conv2D(data_format='channels_last',
                     filters=256,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     activation='relu')(input)
        out = MaxPooling2D()(out)
        out = Conv2D(data_format='channels_last',
                     filters=256,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     activation='relu')(out)
        out = MaxPooling2D()(out)
        out = Flatten()(out)
        value = Dense(units=64, kernel_initializer='zeros',
                      activation='relu')(out)
        value = Dense(units=1, kernel_initializer='zeros',
                      activation='relu')(value)
        advantage = Dense(units=64,
                          kernel_initializer='zeros',
                          activation='relu')(out)
        advantage = Dense(units=self.output_dim,
                          kernel_initializer='zeros',
                          activation='softmax')(advantage)
        advantage_mean = Lambda(lambda x: K.mean(x, axis=1))(advantage)
        advantage = Subtract()([advantage, advantage_mean])
        out = Add()([value, advantage])

        model = Model(inputs=input, outputs=out)
        model.compile(optimizer="adam", loss="mse")
        return model
示例#13
0
def custom_model(
    input_dim=5383, embedding_dim=64, input_length=10, fc_unit=64
):
    inputs = tf.keras.Input(shape=(input_length,))
    embed_layer = Embedding(
        output_dim=embedding_dim, mask_zero=True, input_length=input_length
    )
    embeddings = embed_layer(inputs)
    embeddings = ApplyMask()(embeddings)

    emb_sum = K.sum(embeddings, axis=1)
    emb_sum_square = K.square(emb_sum)
    emb_square = K.square(embeddings)
    emb_square_sum = K.sum(emb_square, axis=1)
    second_order = K.sum(
        0.5 * Subtract()([emb_sum_square, emb_square_sum]), axis=1
    )

    id_bias = Embedding(output_dim=1, mask_zero=True)(inputs)
    id_bias = ApplyMask()(id_bias)
    first_order = K.sum(id_bias, axis=(1, 2))
    fm_output = tf.keras.layers.Add()([first_order, second_order])

    nn_input = Flatten()(embeddings)
    nn_h = Dense(fc_unit)(nn_input)
    deep_output = Dense(1)(nn_h)
    deep_output = tf.reshape(deep_output, shape=(-1,))
    logits = tf.keras.layers.Add()([fm_output, deep_output])
    probs = tf.reshape(tf.sigmoid(logits), shape=(-1, 1))

    m = tf.keras.Model(
        inputs=inputs, outputs={"logits": logits, "probs": probs}
    )
    return m
示例#14
0
def baseline_model():
    input_1 = Input(shape=(None, None, 3))
    input_2 = Input(shape=(None, None, 3))

    base_model = MobileNetV2(weights="imagenet", include_top=False)

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=[acc], optimizer=Adam(0.00001))

    model.summary()

    return model
def baseline_model(seq_dim=3):
    input_1 = Input(shape=(None, 3))
    input_2 = Input(shape=(None, seq_dim))

    base_model = encoder(seq_dim=3)

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool1D()(x1), GlobalAvgPool1D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool1D()(x2), GlobalAvgPool1D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])
    x = Dropout(0.1)(x)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.1)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=[acc], optimizer=Adam(0.0001))

    model.summary()

    return model
示例#16
0
def prepModel():

    # Load pretrained model - except the last softmax layer
    base_model = ResNet50()
    print ("base_model.layers[0].input_shape: {}".format(base_model.layers[0].input_shape))

    input_shape = (224, 224, 3)
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    encoded_l = base_model(left_input)
    encoded_r = base_model(right_input)

    subtracted = Subtract()([encoded_l, encoded_r])
    prediction = Dense(1, activation='sigmoid')(subtracted)

    # this is the model we will train
    model = Model(inputs=[left_input, right_input], outputs=prediction)

    # first: train only the top layers
    for layer in base_model.layers:
        layer.trainable = False

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.001), #'adam', # default LR: 0.001
                  metrics=['accuracy'])

    return model
示例#17
0
def build_model_normalized_ASI(n_input,n_output,rescale_para,base_model = build_model_dense, \
        layers = [800,200,200],ActFun = 'relu',ASI = True):
    [mean, std, mean2, std2] = rescale_para
    #    def linear_transform(x):
    #        v1 = K.constant(mean,dtype = tf.float32)
    #        v2 = K.constant(std, dtype = tf.float32)
    #        return (x-v1)/v2

    data_input = Input(shape=(n_input, ))
    data_input_post = Lambda(lambda x: (x - mean) / std)(data_input)
    #    data_input_post = Lambda(linear_transform)(data_input)
    m1 = base_model(n_input=n_input,
                    n_output=n_output,
                    layers=layers,
                    ActFun=ActFun)
    m2 = base_model(n_input=n_input,
                    n_output=n_output,
                    layers=layers,
                    ActFun=ActFun)
    if ASI == True:
        m2.set_weights(m1.get_weights())
    out_a = m1(data_input_post)
    out_b = m2(data_input_post)
    data_out_pre = Subtract()([out_a, out_b])
    data_out_pre = Lambda(lambda x: 0.70710678 * x)(data_out_pre)
    data_out = Lambda(lambda x: x)(data_out_pre)
    #    data_out = Lambda(lambda x: x*std2+mean2)(data_out_pre)
    return Model(data_input, data_out)
示例#18
0
def buildDQN(n_actions=4, learning_rate=0.00001, frame_height=84, frame_width=84, sequence_length=4):
    input_tensor=Input(shape=(frame_height, frame_width, sequence_length), dtype=tf.float32) 

    input_scaled = Lambda(lambda input_unscaled: input_unscaled / 255)(input_tensor)  # normalize by 255

    conv1 = Conv2D(32, (8, 8), strides=4, kernel_initializer=VarianceScaling(scale=2.), 
    activation='relu', use_bias=False,name='conv1')(input_scaled)
    conv2 = Conv2D(64, (4, 4), strides=2, kernel_initializer=VarianceScaling(scale=2.), 
    activation='relu', use_bias=False,name='conv2')(conv1)
    conv3 = Conv2D(64, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.), 
    activation='relu', use_bias=False,name='conv3')(conv2)
    conv4 = Conv2D(1024, (7, 7), strides=1, kernel_initializer=VarianceScaling(scale=2.), 
    activation='relu', use_bias=False,name='conv4')(conv3)

    value_stream, advantage_stream = Lambda(lambda w: tf.split(w, 2, 3))(conv4)

    value_stream = Flatten(name="value_stream_flatten")(value_stream)
    value = Dense(1, kernel_initializer=VarianceScaling(scale=2.),name="value_stream_dense")(value_stream)

    advantage_stream = Flatten(name="advantage_stream_flatten")(advantage_stream)
    advantage = Dense(n_actions, kernel_initializer=VarianceScaling(scale=2.),name="advantage_stream_dense")(advantage_stream)

    reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True),name="reduce_mean")

    q_values = Add(name="q_values")([value, Subtract()([advantage, reduce_mean(advantage)])])

    model = Model(input_tensor, q_values)
    model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())

    return model 
示例#19
0
def create_shunt_trainings_model(model, model_shunt, shunt_locations):

    shunt_input = model.layers[shunt_locations[0] - 1].output
    output_original_model = model.layers[shunt_locations[1]].output
    output_original_model = Flatten()(output_original_model)
    #output_original_model = K.l2_normalize(output_original_model,axis=1)

    #print(model.layers[shunt_locations[0]-1].name)
    #print(model.layers[shunt_locations[1]].name)

    x = model_shunt(shunt_input)

    x = Flatten()(x)
    #x = K.l2_normalize(x,axis=1)
    x = Subtract()([x, output_original_model])
    #x = Multiply()([x, x])
    #x = keras.backend.sum(x, axis=1)
    #x = keras.backend.sqrt(x)
    #x = Lambda(lambda x: x * 1/(model_shunt.output_shape[1]*model_shunt.output_shape[2]))(x)

    model_training = keras.models.Model(inputs=model.input,
                                        outputs=[x],
                                        name='shunt_training')
    for layer in model_training.layers:
        layer.trainable = False
    model_training.get_layer(name=model_shunt.name).trainable = True

    return model_training
示例#20
0
def build_q_network(n_actions, learning_rate=0.00001, input_shape=(84, 84), history_length=4):

    model_input = Input(shape=(input_shape[0], input_shape[1], history_length))
    x = Lambda(lambda layer: layer / 255)(model_input)  # normalize by 255

    x = Conv2D(32, (8, 8), strides=4, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = Conv2D(64, (4, 4), strides=2, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = Conv2D(64, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
    x = Conv2D(1024, (7, 7), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)

    val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(x) 

    val_stream = Flatten()(val_stream)
    val = Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)

    adv_stream = Flatten()(adv_stream)
    adv = Dense(n_actions, kernel_initializer=VarianceScaling(scale=2.))(adv_stream)

    reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True))  # custom layer for reduce mean

    q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])])

    model = Model(model_input, q_vals)
    model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())

    return model
示例#21
0
    def _build_model(self,
                     input_size,
                     output_size,
                     hidden_size0=200,
                     hidden_size1=10):
        if self.dueling_dqn:
            inputs = Input(shape=(input_size, ))
            x = Dense(hidden_size0, activation='relu')(inputs)
            x = Dense(hidden_size1, activation='relu')(x)

            value = Dense(3, activation='linear')(x)
            a = Dense(3, activation='linear')(x)
            mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(a)
            advantage = Subtract()([a, mean])

            q = Add()([value, advantage])

            model = Model(inputs=inputs, outputs=q)
            model.compile(loss='mse', optimizer=Adam(self.lr))
        else:
            model = Sequential()
            model.add(
                Dense(hidden_size0,
                      input_shape=(input_size, ),
                      activation='relu'))
            model.add(Dense(hidden_size1, activation='relu'))
            model.add(Dense(output_size, activation='linear'))
            model.compile(loss='mse', optimizer=Adam(lr=self.lr))
        return model
示例#22
0
    def _merger(self, net: Tensor, item: Tensor) -> Tensor:
        """"Combine feature maps"""

        # crop feature maps
        crop_size = int(item.shape[1] - net.shape[1]) / 2
        item_cropped = Cropping2D(int(crop_size))(item)

        # adapt number of filters via 1x1 convolutional to allow merge
        current_filters = int(net.shape[-1])
        item_cropped = Conv2D(current_filters,
                              1,
                              activation=self._activation,
                              padding=self._padding)(item_cropped)

        # Combine feature maps by adding
        if self._merge_type == "add":
            return Add()([item_cropped, net])
        # Combine feature maps by subtracting
        if self._merge_type == "subtract":
            return Subtract()([item_cropped, net])
        # Combine feature maps by multiplication
        if self._merge_type == "multiply":
            return Multiply()([item_cropped, net])

        # Raise ValueError if merge type is unsupported
        raise ValueError(f"unsupported merge type: {self._merge_type}")
示例#23
0
    def create_model(self):
        # TODO: Add LSTM and maybe dropout
        inp = Input(self.input_shape)
        out = Conv2D(data_format='channels_last',
                     filters=256,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     activation='relu')(inp)
        out = MaxPooling2D()(out)
        out = Conv2D(data_format='channels_last',
                     filters=256,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     activation='relu')(out)
        out = MaxPooling2D()(out)
        out = Flatten()(out)
        value = Dense(units=64, kernel_initializer='zeros',
                      activation='relu')(out)
        value = Dense(units=1, kernel_initializer='zeros',
                      activation='relu')(value)
        advantage = Dense(units=64,
                          kernel_initializer='zeros',
                          activation='relu')(out)
        advantage = Dense(units=self.output_shape,
                          kernel_initializer='zeros',
                          activation='softmax')(advantage)
        advantage_mean = Lambda(lambda x: k.mean(x, axis=1))(advantage)
        advantage = Subtract()([advantage, advantage_mean])
        out = Add()([value, advantage])

        model = Model(inputs=inp, outputs=out)
        model.compile(loss="mean_squared_error",
                      optimizer=Adam(lr=self.learning_rate))
        return model
示例#24
0
    def build_model(self, state_size, neurons, action_size):
        #前面的LSTM層
        state_input = Input(shape=state_size)
        lstm1 = LSTM(neurons, activation='sigmoid',
                     return_sequences=False)(state_input)

        #連結層
        d1 = Dense(neurons, activation='relu')(lstm1)
        d2 = Dense(neurons, activation='relu')(d1)

        #dueling
        d3_a = Dense(neurons / 2, activation='relu')(d2)
        d3_v = Dense(neurons / 2, activation='relu')(d2)
        a = Dense(action_size, activation='linear')(d3_a)
        value = Dense(1, activation='linear')(d3_v)
        a_mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(a)
        advantage = Subtract()([a, a_mean])
        q = Add()([value, advantage])

        # noisy
        noise = NoisyDense(action_size, bias=True, training=True)
        final = noise(q)

        #最後compile
        model = Model(inputs=state_input, outputs=final)
        model.compile(loss='mse', optimizer=Adam(lr=0.001))

        return model
def create_comparison_model(input_shape):
    num_features_per_fighter = input_shape[0] // 2

    model_ = tf.keras.models.Sequential()
    model_.add(tf.keras.Input(shape=num_features_per_fighter))
    model_.add(tf.keras.layers.Dense(32, activation='relu'))
    model_.add(tf.keras.layers.Dense(32, activation='relu'))
    model_.add(tf.keras.layers.Dropout(0.5))

    model_.add(tf.keras.layers.Dense(1, activation='relu'))

    # Run cnn model on each frame
    input_tensor = Input(shape=input_shape)
    fighter0_state = Lambda(lambda x: x[:, :num_features_per_fighter],
                            name='fighter0_state')(input_tensor)
    fighter1_state = Lambda(lambda x: x[:, num_features_per_fighter:],
                            name='fighter1_state')(input_tensor)

    fighter0_score = model_(fighter0_state)
    fighter1_score = model_(fighter1_state)
    fighter0_score = Lambda(lambda x: x, name='fighter0_score')(fighter0_score)
    fighter1_score = Lambda(lambda x: x, name='fighter1_score')(fighter1_score)

    difference_score = Subtract(name='subtracter')(
        [fighter1_score, fighter0_score])
    prediction = Activation('sigmoid')(difference_score)
    return Model(inputs=input_tensor, outputs=prediction)
示例#26
0
def factorized_bilinear_pooling_new(F1, F2, init_filters, new_filters, name=""):

  F1_expand = Conv2D(filters=new_filters, kernel_size=1, padding='same', strides=1, name=name + "Conv1")(F1)
  F1_expand = ReLU(name=name + "Relu1")(F1_expand)

  F2_expand = Conv2D(filters=new_filters, kernel_size=1, padding='same', strides=1, name=name + "Conv2")(F2)
  F2_expand = ReLU(name=name + "Relu2")(F2_expand)

  F_aux = Add(name=name + "Add1")([F1_expand, F2_expand])

  inter = Multiply(name=name + "Mul1")([F1_expand, F2_expand])
  inter = Dropout(rate=0.1, name=name + "Dropout1")(inter)

  F = Conv2D(filters=init_filters, kernel_size=1, padding='same', strides=1, name=name + "Conv3")(inter)
  F = ReLU(name=name + "Relu3")(F)

  out = Concatenate(name=name + "Concat")([F_aux, F])
  out = Conv2D(filters=init_filters, kernel_size=1, padding='same', strides=1, name=name + "Conv4")(out)
  out = ReLU(name=name + "Relu4")(out)

  power_normalize = Subtract()([Lambda(tf.keras.backend.sqrt)(ReLU(name=name+"Relu5")(out)), Lambda(tf.keras.backend.sqrt)(ReLU(name=name+"Relu6")(-out))])
  # power_normalize = tf.sqrt(tf.nn.relu(out)) - tf.sqrt(tf.nn.relu(-out))
  l2_normalize = Lambda(tf.keras.backend.l2_normalize, arguments={'axis':-1})(power_normalize)

  return l2_normalize
示例#27
0
def build_q_network(n_actions,
                    learning_rate=0.00001,
                    input_shape=(84, 84),
                    history_length=4):
    """Builds a dueling DQN as a Keras model
    Arguments:
        n_actions: Number of possible action the agent can take
        learning_rate: Learning rate
        input_shape: Shape of the preprocessed frame the model sees
        history_length: Number of historical frames the agent can see
    Returns:
        A compiled Keras model
    """
    model_input = Input(shape=(input_shape[0], input_shape[1], history_length))
    x = Lambda(lambda layer: layer / 255)(model_input)  # normalize by 255

    x = Conv2D(32, (8, 8),
               strides=4,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(64, (4, 4),
               strides=2,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(64, (3, 3),
               strides=1,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(1024, (7, 7),
               strides=1,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)

    # Split into value and advantage streams
    val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(
        x)  # custom splitting layer

    val_stream = Flatten()(val_stream)
    val = Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)

    adv_stream = Flatten()(adv_stream)
    adv = Dense(n_actions,
                kernel_initializer=VarianceScaling(scale=2.))(adv_stream)

    # Combine streams into Q-Values
    reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True)
                         )  # custom layer for reduce mean

    q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])])

    # Build model
    model = Model(model_input, q_vals)
    model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())

    return model
示例#28
0
    def _build_model(self):
        """
        Builds a CNN model that will be used by the agent to predict Q-values.

        Returns
        -------
        A compiled Keras model with Adam Optimizer and Huber loss.
        """
        input = Input(shape=self._STATE_SPACE)
        x = Sequential([
            Conv2D(filters=32,
                   kernel_size=(8, 8),
                   strides=(4, 4),
                   padding='valid',
                   activation='relu',
                   kernel_initializer=VarianceScaling(scale=2.0)),
            Conv2D(filters=64,
                   kernel_size=(4, 4),
                   strides=(2, 2),
                   activation='relu',
                   padding='valid',
                   kernel_initializer=VarianceScaling(scale=2.0)),
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   padding='valid',
                   kernel_initializer=VarianceScaling(scale=2.0)),
            Conv2D(filters=1024,
                   kernel_size=(7, 7),
                   strides=(1, 1),
                   activation='relu',
                   padding='valid',
                   kernel_initializer=VarianceScaling(scale=2.0)),
        ])(input)

        value_tensor, advantage_tensor = Lambda(
            lambda x: tf.split(x, 2, axis=3))(x)

        value_tensor = Flatten()(value_tensor)
        advantage_tensor = Flatten()(advantage_tensor)

        advantage = Dense(
            self._NUM_ACTIONS,
            kernel_initializer=VarianceScaling(scale=2.0))(advantage_tensor)
        value = Dense(
            1, kernel_initializer=VarianceScaling(scale=2.0))(value_tensor)

        mean_advantage = Lambda(
            lambda x: tf.reduce_mean(x, axis=1, keepdims=True))(advantage)
        normalized_advantage = Subtract()([advantage, mean_advantage])

        output = Add()([value, normalized_advantage])

        model = Model(inputs=input, outputs=output)
        optimizer = Adam(1e-5)
        loss = Huber(delta=1.0)
        model.compile(optimizer=optimizer, loss=loss)
        return model
    def BRModule(self, input_data, BRM_x, scale=4):
        """
        A single Block Residual Module (BRM)
        :param input_data: tf object
        :param BRM_x: index of BRM, x sub-index in the paper
        :param scale: magnifying scale factor
        :return: two tf objects for upper (super resolved) and lower (back-projected) flows
        """

        x1 = Conv2DTranspose(filters=64,
                             kernel_size=scale,
                             strides=scale,
                             padding='valid',
                             activation=PReLU(),
                             kernel_initializer=VarianceScaling(
                                 scale=2.0,
                                 mode="fan_in",
                                 distribution="untruncated_normal"),
                             name='BRM{}_CT'.format(str(BRM_x)))(input_data)
        xup = x1
        for i in range(3):
            xup = Conv2D(filters=64,
                         kernel_size=3,
                         padding='same',
                         activation=PReLU(),
                         kernel_initializer=VarianceScaling(
                             scale=2.0,
                             mode="fan_in",
                             distribution="untruncated_normal"),
                         name='BRM{}_C{}_u'.format(str(BRM_x),
                                                   str(i + 1)))(xup)

        x2 = Conv2D(filters=64,
                    kernel_size=scale,
                    strides=scale,
                    padding='valid',
                    activation=PReLU(),
                    kernel_initializer=VarianceScaling(
                        scale=2.0,
                        mode="fan_in",
                        distribution="untruncated_normal"),
                    name='BRM{}_C{}_b'.format(str(BRM_x), str(1)))(x1)

        x2 = Subtract(name='BRM{}_S_b'.format(str(BRM_x)))([input_data, x2])
        xdn = x2

        for i in range(3):
            x2 = Conv2D(filters=64,
                        kernel_size=3,
                        padding='same',
                        activation=PReLU(),
                        kernel_initializer=VarianceScaling(
                            scale=2.0,
                            mode="fan_in",
                            distribution="untruncated_normal"),
                        name='BRM{}_C{}_b'.format(str(BRM_x), str(i + 2)))(x2)

        xdn = Add(name='BRM{}_A_b'.format(str(BRM_x)))([xdn, x2])
        return xup, xdn  # xup: SR flow in upper line,,, xdn: Residual flow in bottom line
    def call(self, x):
        assert isinstance(x, list)
        advantage_values, values = x

        advantage_average = K.mean(advantage_values, axis=1, keepdims=True)
        advantage = Subtract()([advantage_values, advantage_average])
        action_values = Add()([advantage, values])
        return action_values