Beispiel #1
0
def GMF(num_users, num_items, latent_dim):
    # input
    user_input = Input(shape=(1,), dtype='int32', name='user_input_op')
    item_input = Input(shape=(1,), dtype='int32', name='item_input_op')

    # embedding
    user_embedding = Embedding(input_dim=num_users, output_dim=latent_dim,
                               embeddings_initializer=initializers.random_normal(),
                               input_length=1, name='user_embedding_op')(user_input)
    item_embedding = Embedding(input_dim=num_items, output_dim=latent_dim,
                               embeddings_initializer=initializers.random_normal(),
                               input_length=1, name='item_embedding_op')(item_input)

    # flatten
    user_flatten = Flatten()(user_embedding)
    item_flatten = Flatten()(item_embedding)

    # multiply
    predict_vector = multiply([user_flatten, item_flatten])

    # prediction
    prediction = Dense(1, activation='sigmoid', name='prediction_op')(predict_vector)

    model_ = Model(inputs=[user_input, item_input], outputs=prediction)

    return model_
Beispiel #2
0
def HourglassNetwork(inpnuts,
                     num_stacks,
                     num_classes,
                     cnv_dim=256,
                     dims=[256, 384, 384, 384, 512]):
    inter = pre(inpnuts, cnv_dim)
    outputs = []
    for i in range(num_stacks):
        prev_inter = inter
        _heads, inter = hourglass_module(num_classes, inter, cnv_dim, i, dims)
        outputs.append(_heads)
        if i < num_stacks - 1:
            inter_ = Conv2D(cnv_dim,
                            1,
                            use_bias=False,
                            kernel_initializer=random_normal(stddev=0.02),
                            name='inter_.%d.0' % i)(prev_inter)
            inter_ = BatchNormalization(epsilon=1e-5,
                                        name='inter_.%d.1' % i)(inter_)

            cnv_ = Conv2D(cnv_dim,
                          1,
                          use_bias=False,
                          kernel_initializer=random_normal(stddev=0.02),
                          name='cnv_.%d.0' % i)(inter)
            cnv_ = BatchNormalization(epsilon=1e-5, name='cnv_.%d.1' % i)(cnv_)

            inter = Add(name='inters.%d.inters.add' % i)([inter_, cnv_])
            inter = Activation('relu', name='inters.%d.inters.relu' % i)(inter)
            inter = residual(inter, cnv_dim, 'inters.%d' % i)
    return outputs
def create_model():
    # create model
    model = Sequential()
    model.add(Conv2D(
        32, (5, 5),
        input_shape=(32, 32,
                     3)))  # by default it initializes the weights to Xavier
    model.add(MaxPooling2D((2, 2)))
    model.add(Dropout(0.2))
    model.add(Activation('relu'))
    model.add(Conv2D(
        100, (1, 1)))  # by default it initializes the weights to Xavier
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(
        Dense(
            1024,
            kernel_initializer=initializers.random_normal(stddev=0.01)))  # 128
    model.add(Activation('relu'))
    model.add(
        Dense(
            256,
            kernel_initializer=initializers.random_normal(stddev=0.01)))  #128
    model.add(Activation('relu'))
    model.add(Dense(43))
    model.add(Activation('softmax'))

    # Compile model
    model.compile('adam', 'categorical_crossentropy', ['accuracy'])
    return model
Beispiel #4
0
def build_n2n2_AI(LO_weightfile='./n2n2/n2n2_LO.hdf5',
                  K_weightsfile='./n2n2/n2n2_K.hdf5',
                  LO=0,
                  NLO=1):
    n2n2_LO = Sequential()
    for i in range(0, 8):
        n2n2_LO.add(
            Dense(100,
                  kernel_initializer=initializers.random_normal(mean=0.0,
                                                                stddev=0.1),
                  input_dim=7))
        n2n2_LO.add(Activation('selu'))
    n2n2_LO.add(Dense(1, kernel_initializer='uniform', activation='linear'))
    n2n2_LO.compile(optimizer=Adam(lr=0.001,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=None,
                                   decay=0.0),
                    loss='mape')
    n2n2_LO.load_weights(LO_weightfile)
    if LO == 1 and NLO == 0:
        return n2n2_LO
    if (LO == 1 and NLO == 1) or (LO == 0 and NLO == 1):
        n2n2_NLO = Sequential()
        for i in range(0, 8):
            n2n2_NLO.add(
                Dense(32,
                      kernel_initializer=initializers.random_normal(
                          mean=0.0, stddev=0.176177),
                      input_dim=6))
            n2n2_NLO.add(Activation('selu'))
        n2n2_NLO.add(
            Dense(1, kernel_initializer='uniform', activation='linear'))
        n2n2_NLO.load_weights(K_weightsfile)
        return n2n2_LO, n2n2_NLO
def get_model(num_users, num_items, latent_dim, regs=[0, 0]):
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    # MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1)
    MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim,
                                  embeddings_initializer=initializers.random_normal(),
                                  embeddings_regularizer=l2(regs[0]), input_length=1, name='user_embedding')
    # MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1)
    MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim,
                                  embeddings_initializer=initializers.random_normal(),
                                  embeddings_regularizer=l2(regs[1]),
                                  input_length=1, name='item_embedding')
    
    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MF_Embedding_User(user_input))
    item_latent = Flatten()(MF_Embedding_Item(item_input))
    
    # Element-wise product of user and item embeddings 
    # predict_vector = merge([user_latent, item_latent], mode = 'mul')
    predict_vector = multiply([user_latent, item_latent])
    
    # Final prediction layer
    # prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
    prediction = Dense(1, activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(), name='prediction')(predict_vector)
    
    model_ = Model(input=[user_input, item_input], output=prediction)

    return model_
Beispiel #6
0
 def define_nn(self, n_neurons, hidden, m, nb_hidden_layer, learning_rate):
     classifier = Sequential()
     if hidden == True:
         # First Hidden Layer
         layer0 = Dense(n_neurons,
                        activation='sigmoid',
                        kernel_initializer=initializers.random_normal(
                            stddev=0.03, seed=98765),
                        input_dim=m)
         classifier.add(layer0)
         nb = 1
         while (nb < nb_hidden_layer):
             layer_nb = Dense(n_neurons,
                              activation='sigmoid',
                              kernel_initializer=initializers.random_normal(
                                  stddev=0.03, seed=98765))
             classifier.add(layer_nb)
             nb += 1
     # Output Layer
     layer1 = Dense(1, activation='sigmoid', kernel_initializer=initializers.random_normal(stddev=0.03, seed=98765), \
                    kernel_regularizer=regularizers.l2(0.5))
     classifier.add(layer1)
     # Compiling the neural network
     sgd = optimizers.SGD(lr=learning_rate, clipvalue=0.5)
     classifier.compile(optimizer=sgd,
                        loss='binary_crossentropy',
                        metrics=['accuracy'])
     return classifier
Beispiel #7
0
def cnn(height, width):
    question_input = Input(shape=(height, width, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 320),
                     activation='sigmoid',
                     padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.02))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.25)(F1_Q)
    predictQ = Dense(32,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.02))(Drop1_Q)
    prediction2 = Dropout(0.25)(predictQ)
    predictions = Dense(1, activation='relu')(prediction2)
    model = Model(inputs=[question_input], outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
Beispiel #8
0
def identity_block(input_tensor, kernel_size, filters, stage, block):

    filters1, filters2, filters3 = filters

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1),
               kernel_initializer=random_normal(stddev=0.02),
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               kernel_initializer=random_normal(stddev=0.02),
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1),
               kernel_initializer=random_normal(stddev=0.02),
               name=conv_name_base + '2c')(x)
    x = BatchNormalization(name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x
Beispiel #9
0
def build_model(input_size):
    input_shape = (input_size[0], input_size[1], 3)
    model = Sequential()
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               kernel_initializer=initializers.random_normal(mean=0,
                                                             stddev=0.001),
               bias_initializer='zeros',
               padding='same',
               use_bias=True,
               input_shape=input_shape))
    model.add(
        Conv2D(filters=64,
               kernel_size=(5, 5),
               activation='relu',
               kernel_initializer=initializers.random_normal(mean=0,
                                                             stddev=0.001),
               bias_initializer='zeros',
               padding='same',
               use_bias=True))
    model.add(
        Conv2D(filters=3,
               kernel_size=(3, 3),
               activation='linear',
               kernel_initializer=initializers.random_normal(mean=0,
                                                             stddev=0.001),
               bias_initializer='zeros',
               padding='same',
               use_bias=True))
    optimizer = Adam(lr=0.001)
    # optimizer = SGD(lr=0.01, momentum=0.9)
    model.compile(optimizer=optimizer, loss='mse')
    return model
Beispiel #10
0
    def build(self, input_shape):

        input_dimension = input_shape[-1]

        units1 = self.output_dimension * self.number_of_mixtures

        self.mu_kernel = self.add_weight(name="mu_kernel",
                                         shape = shape(input_dimension, units1),
                                         initializer=initializers.random_normal(),
                                         trainable=True)
        self.mu_bias = self.add_weight(name="mu_bias",
                                       shape = shape(units1),
                                       initializer=initializers.zeros(),
                                       trainable=True)

        self.sigma_kernel = self.add_weight(name="sigma_kernel",
                                            shape = shape(input_dimension, units1),
                                            initializer=initializers.random_normal(),
                                            trainable=True)
        self.sigma_bias = self.add_weight(name="sigma_bias",
                                          shape = shape(units1),
                                          initializer=initializers.zeros(),
                                          trainable=True)

        units2 = self.number_of_mixtures

        self.pi_kernel = self.add_weight(name="pi_kernel",
                                         shape = shape(input_dimension, units2),
                                         initializer=initializers.random_normal(),
                                         trainable=True)
        self.pi_bias = self.add_weight(name="pi_bias",
                                       shape = shape(units2),
                                       initializer=initializers.zeros(),
                                       trainable=True)
Beispiel #11
0
    def build(self, input_shape, x=None):
        """       Create a trainable weight variable for this layer.
            x must be a tensor object
            input_shape - must have the shape (batch, height, width, channels) according to "channel_last" of Conv2D layer
            reshape_input_shape - BHW * D

            Note that the sigma values are saves as std, not as variance"""

        self.mu = self.add_weight(name='mu',
                                  shape=(self.n_clusters, input_shape[-1]),
                                  initializer=random_normal(mean=0,
                                                            stddev=0.4,
                                                            seed=self.seed),
                                  trainable=True)

        self.std = self.add_weight(name='std',
                                   shape=(self.n_clusters, input_shape[-1]),
                                   initializer=random_normal(mean=0.3,
                                                             stddev=0.05,
                                                             seed=self.seed),
                                   trainable=True,
                                   constraint=MinValue(min_value=self.epsilon))

        self.alpha = self.add_weight(
            name='alpha',
            shape=(self.n_clusters, ),
            initializer=constant(value=(1 / self.n_clusters)),
            trainable=True)
        super(GMM, self).build(input_shape)
Beispiel #12
0
    def __call__(self, ):
        inp = layers.Input((None, None, self.n_channels))
        map_size = tf.shape(inp)[1:3]
        shared = layers.Conv2D(
            self.n_channels,
            3,
            activation='relu',
            padding='SAME',
            kernel_initializer=initializers.random_normal(stddev=0.01),
            kernel_regularizer=regularizers.l2(1.0),
            bias_regularizer=regularizers.l2(2.0))(inp)

        logits = layers.Conv2D(
            self.n_anchors * 2,
            1,
            kernel_initializer=initializers.random_normal(stddev=0.01),
            kernel_regularizer=regularizers.l2(1.0),
            bias_regularizer=regularizers.l2(2.0))(shared)
        logits = layers.Reshape((-1, 2))(logits)

        score = layers.Softmax()(logits)

        delta = layers.Conv2D(
            self.n_anchors * 4,
            1,
            kernel_initializer=initializers.random_normal(stddev=0.01),
            kernel_regularizer=regularizers.l2(1.0),
            bias_regularizer=regularizers.l2(2.0))(shared)
        delta = layers.Reshape((-1, 4))(delta)

        model = models.Model(inp, [logits, delta, score])
        return model
Beispiel #13
0
def get_rpn(base_layers, num_anchors):
    #----------------------------------------------------#
    #   利用一个512通道的3x3卷积进行特征整合
    #----------------------------------------------------#
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_initializer=random_normal(stddev=0.02),
               name='rpn_conv1')(base_layers)

    #----------------------------------------------------#
    #   利用一个1x1卷积调整通道数,获得预测结果
    #----------------------------------------------------#
    x_class = Conv2D(num_anchors, (1, 1),
                     activation='sigmoid',
                     kernel_initializer=random_normal(stddev=0.02),
                     name='rpn_out_class')(x)
    x_regr = Conv2D(num_anchors * 4, (1, 1),
                    activation='linear',
                    kernel_initializer=random_normal(stddev=0.02),
                    name='rpn_out_regress')(x)

    x_class = Reshape((-1, 1), name="classification")(x_class)
    x_regr = Reshape((-1, 4), name="regression")(x_regr)
    return [x_class, x_regr]
Beispiel #14
0
def id2(input_tensor, kernel_size, filters, stage, block, weight_decay,
        strides):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    #print(input_tensor.shape)
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2DTranspose(filters1, (4, 4),
                        strides=strides,
                        padding='same',
                        name=conv_name_base + '2a',
                        kernel_regularizer=kernel_reg,
                        bias_regularizer=bias_reg,
                        kernel_initializer=random_normal(stddev=0.01),
                        bias_initializer=constant(0.0))(input_tensor)
    #print(x.shape)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               name=conv_name_base + '2b',
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3,
               kernel_size,
               padding='same',
               name=conv_name_base + '2c',
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    #x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    #x = layers.add([x, input_tensor])
    #x = Activation('relu')(x)
    return x
Beispiel #15
0
    def build_model(shape, gru_layers, reg_layers, drop_layers):

        # Embedding Layer
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')

        item_input = Input(shape=(1, ), dtype='int32', name='item_input')

        time_input = Input(shape=(1, ), dtype='int32', name='time_input')

        user_embedding = Flatten()(Embedding(
            input_dim=shape[0],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_user_embedding')(user_input))
        item_embedding = Flatten()(Embedding(
            input_dim=shape[1],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_item_embedding')(item_input))
        time_embedding = Flatten()(Embedding(
            input_dim=shape[2],
            output_dim=gru_layers[1],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_time_embedding')(time_input))

        user_embedding = Dropout(drop_layers[0])(user_embedding)
        item_embedding = Dropout(drop_layers[0])(item_embedding)
        time_embedding = Dropout(drop_layers[0])(time_embedding)

        gru_vector = Concatenate(axis=1)([user_embedding, item_embedding])
        gru_vector = Reshape(target_shape=(int(gru_layers[1]), -1))(gru_vector)

        for index in range(1, len(gru_layers) - 1):
            layers = GRU(units=gru_layers[index],
                         kernel_initializer=initializers.he_normal(),
                         kernel_regularizer=regularizers.l2(reg_layers[index]),
                         activation='tanh',
                         recurrent_activation='hard_sigmoid',
                         dropout=drop_layers[index],
                         return_sequences=(index != (len(gru_layers) - 2)),
                         name='gru_layer_%d' % index)
            gru_vector = layers([gru_vector, time_embedding])

        gru_vector = Dropout(drop_layers[-1])(gru_vector)

        prediction = Dense(units=gru_layers[-1],
                           activation='relu',
                           kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=regularizers.l2(reg_layers[-1]),
                           name='gru_prediction')(gru_vector)
        _model = Model(inputs=[user_input, item_input, time_input],
                       outputs=prediction)
        return _model
Beispiel #16
0
    def residual_block(self, input_layer, num_ch, name):
        x = layers.Conv2D(num_ch, (3,3), padding = 'same', kernel_initializer=initializers.random_normal(stddev=0.01), name = (name + "Conv2D_1"))(input_layer)
        x = layers.Conv2D(num_ch, (3,3), padding = 'same', kernel_initializer=initializers.random_normal(stddev=0.01), name = (name + "Conv2D_2"))(x)
        x = layers.add([x, input_layer])
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        return (x)
    def GenerateModel(self):
        gf_dim = 64
        gan = Sequential()
        gan.add(
            Dense(8192, use_bias=True, bias_initializer='zeros',
                  input_dim=100))
        gan.add(Reshape([4, 4, gf_dim * 8]))
        gan.add(BatchNormalization(epsilon=1e-5, momentum=0.9, scale=True))
        gan.add(Activation('relu'))
        gan.add(
            Conv2DTranspose(
                gf_dim * 4,
                5,
                strides=(2, 2),
                padding='same',
                use_bias=True,
                kernel_initializer=initializers.random_normal(stddev=0.02),
                bias_initializer='zeros')
        )  #see in channel value and std_value for random normal
        gan.add(BatchNormalization(epsilon=1e-5, momentum=0.9, scale=True))
        gan.add(Activation('relu'))
        gan.add(
            Conv2DTranspose(
                gf_dim * 2,
                5,
                strides=(2, 2),
                padding='same',
                use_bias=True,
                kernel_initializer=initializers.random_normal(stddev=0.02),
                bias_initializer='zeros')
        )  #see in channel value and std_value for random normal
        gan.add(BatchNormalization(epsilon=1e-5, momentum=0.9, scale=True))
        gan.add(Activation('relu'))
        gan.add(
            Conv2DTranspose(
                gf_dim * 1,
                5,
                strides=(2, 2),
                padding='same',
                use_bias=True,
                kernel_initializer=initializers.random_normal(stddev=0.02),
                bias_initializer='zeros')
        )  #see in channel value and std_value for random normal
        gan.add(BatchNormalization(epsilon=1e-5, momentum=0.9, scale=True))
        gan.add(Activation('relu'))
        gan.add(
            Conv2DTranspose(
                3,
                5,
                strides=(2, 2),
                padding='same',
                use_bias=True,
                kernel_initializer=initializers.random_normal(stddev=0.02),
                bias_initializer='zeros')
        )  #see in channel value and std_value for random normal
        gan.add(Activation('tanh'))

        self.GAN = gan
Beispiel #18
0
    def residual_block_first(self,input_layer, num_ch, name):
        x = layers.Conv2D(num_ch, (3,3), padding = 'same', kernel_initializer=initializers.random_normal(stddev=0.01), name = (name + "_Conv2D_1"))(input_layer)
        shortcut = layers.MaxPooling2D((2,2), name = (name + "_maxpooling"))(x)
        x = layers.Conv2D(num_ch, (3,3), padding = 'same',  kernel_initializer=initializers.random_normal(stddev=0.01), name = (name + "_Conv2D_2"))(shortcut)
        x = layers.add([x, shortcut])
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        return (x)
Beispiel #19
0
def MImodel(train_data,label_data,val_data1,val_data2):
    model = Sequential()
    model.add(layers.Dense(8,activation='relu', kernel_initializer=initializers.random_normal(seed=1),bias_initializer=initializers.random_normal(seed=2)))
    #model.add(layers.Dense(4,activation='relu'))
    model.add(layers.Dense(1,activation='sigmoid', kernel_initializer=initializers.random_normal(seed=3),bias_initializer=initializers.random_normal(seed=4)))
    model.compile(optimizer='Adam',loss='mse',metrics=['acc'])
    history = model.fit(train_data, label_data,validation_data=(val_data1,val_data2),epochs=50,batch_size=32)
    miloss = history.history['loss']
    return miloss
Beispiel #20
0
    def clear_memory(self, goal):

        self.learning_done = True  ## Set the done learning flag
        #del self.trainable_model

        del self.memory

        self.memory = PrioritizedReplayBuffer(self.controllerMemCap,
                                              alpha=prioritized_replay_alpha)

        gpu = self.net.gpu

        device = '/cpu' if gpu < 0 else '/gpu:' + str(gpu)

        #del self.net

        gc.collect()

        rmsProp = optimizers.RMSprop(lr=LEARNING_RATE,
                                     rho=0.95,
                                     epsilon=1e-08,
                                     decay=0.0)

        with tf.device(device):
            self.simple_net = Sequential()
            self.simple_net.add(
                Conv2D(32, (8, 8),
                       strides=4,
                       activation='relu',
                       padding='valid',
                       input_shape=(84, 84, 4)))
            self.simple_net.add(
                Conv2D(64, (4, 4),
                       strides=2,
                       activation='relu',
                       padding='valid'))
            self.simple_net.add(
                Conv2D(64, (3, 3),
                       strides=1,
                       activation='relu',
                       padding='valid'))
            self.simple_net.add(Flatten())
            self.simple_net.add(
                Dense(HIDDEN_NODES,
                      activation='relu',
                      kernel_initializer=initializers.random_normal(
                          stddev=0.01, seed=SEED)))
            self.simple_net.add(
                Dense(nb_Action,
                      activation='linear',
                      kernel_initializer=initializers.random_normal(
                          stddev=0.01, seed=SEED)))
            self.simple_net.compile(loss='mse', optimizer=rmsProp)
            self.simple_net.load_weights(recordFolder + '/policy_subgoal_' +
                                         str(goal) + '.h5')
            self.simple_net.reset_states()
Beispiel #21
0
 def QNET(self, name = None):
     input_shape = (self.config.VISUAL_DESCRIPTOR + self.config.ACTIONS*self.config.HIS_STEPS,)
     input_tensor = KL.Input(shape = input_shape, name = name + '_Q_input')
     x = KL.Dense(1024, activation = 'relu', kernel_initializer=random_normal(stddev=0.01), name = name + '_Dense_1')(input_tensor)
     x = KL.Dropout(0.2)(x)
     x = KL.Dense(1024, activation = 'relu', kernel_initializer=random_normal(stddev=0.01), name = name + '_Dense_2')(x)
     x = KL.Dropout(0.2)(x)
     x = KL.Dense(self.config.ACTIONS, activation = 'linear', kernel_initializer=random_normal(stddev=0.01), name = name + '_Action')(x)
     
     return KM.Model(inputs = input_tensor, outputs = x, name = name + '_QNET')
Beispiel #22
0
def fit_model(X, y):
    # design network
    model = Sequential()
    model.add(Dense(10, input_dim=1, kernel_initializer=random_normal(seed=1)))
    model.add(Dense(1, kernel_initializer=random_normal(seed=1)))
    model.compile(loss='mean_squared_error', optimizer='adam')
    # fit network
    model.fit(X, y, epochs=100, batch_size=len(X), verbose=0)
    # forecast
    yhat = model.predict(X, verbose=0)
    print(mean_squared_error(y, yhat[:, 0]))
Beispiel #23
0
def cnn(height_a, height_q, count):
    question_input = Input(shape=(height_q, 1), name='question_input')
    embedding_q = Embedding(input_dim=count,
                            output_dim=128,
                            input_length=height_q)(question_input)
    re_q = Reshape((height_q, 128, 1), input_shape=(height_q, ))(embedding_q)
    conv1_Q = Conv2D(128, (2, 128),
                     activation='sigmoid',
                     padding='valid',
                     kernel_regularizer=regularizers.l2(0.02),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.05))(re_q)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.5)(F1_Q)
    predictQ = Dense(64,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.02),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.05))(Drop1_Q)

    # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01)
    answer_input = Input(shape=(height_a, 1), name='answer_input')
    embedding_a = Embedding(input_dim=count,
                            output_dim=128,
                            input_length=height_a)(answer_input)
    re_a = Reshape((height_a, 128, 1), input_shape=(height_a, ))(embedding_a)
    conv1_A = Conv2D(128, (2, 128),
                     activation='sigmoid',
                     padding='valid',
                     kernel_regularizer=regularizers.l2(0.02),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.05))(re_a)
    Max1_A = MaxPooling2D((399, 1), strides=(1, 1), padding='valid')(conv1_A)
    F1_A = Flatten()(Max1_A)
    Drop1_A = Dropout(0.5)(F1_A)
    predictA = Dense(64,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.02),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.05))(Drop1_A)

    predictions = merge([predictA, predictQ], mode='dot')
    model = Model(inputs=[question_input, answer_input], outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
Beispiel #24
0
def train(data, param):
    """
    对输入神经网络的数据进行训练和评估
    :param data: 输入训练和测试数据
    :param param: 网络配置的参数
    :return: 返回神经网络的测试数据集表现
    """
    from keras.layers import Dense, Flatten, BatchNormalization, Activation, Dropout
    from keras.layers.convolutional import Conv2D, MaxPooling2D
    from keras.models import Sequential
    from keras import initializers
    from keras import regularizers

    x_train, x_test, y_train, y_test = data
    x_train = x_train.reshape((-1, param.loop_num, param.time_intervals, 1))
    y_train = y_train.reshape(-1, 1)
    x_test = x_test.reshape((-1, param.loop_num, param.time_intervals, 1))
    y_test = y_test.reshape(-1, 1)
    # def mape_error(y_true, y_pred):
    #     return K.mean(K.abs(y_pred - y_true)/y_true, axis=-1)
    # model=load_model('E:/LeNet/LeNet-5_model.h5')
    # convolution 1st layer
    model = Sequential()
    model.add(Conv2D(16, (3, 3), strides=(1, 1), input_shape=(param.loop_num, param.time_intervals, 1),
                     padding='same', activation='relu',
                     kernel_initializer=initializers.random_normal(stddev=0.1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # convolution 2nd layer
    model.add(Conv2D(32, (3, 3), strides=(1, 1), padding='same', activation='relu',
                     kernel_initializer=initializers.random_normal(stddev=0.1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(param.regularization)))
    model.add(Dense(16, activation='relu', kernel_regularizer=regularizers.l2(param.regularization)))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mean_absolute_percentage_error',
                  metrics=['mean_absolute_percentage_error', 'mean_absolute_error'])
    model.summary()
    history = LossHistory()
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=param.early_stop_epochs, mode='min')
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',  # 监控模型的验证损失
                                                  factor=0.5,  # 触发时将学习率除以5
                                                  patience=param.reduce_lr_epochs)
    model.fit(x_train, y_train, batch_size=param.batch_size, epochs=param.epochs, shuffle=True,
              validation_split=0.2, callbacks=[history, early_stop, reduce_lr], verbose=0)
    loss, mape, mae = model.evaluate(x_test, y_test, verbose=0)
    # model.save(os.path.join(param.file_path, 'model\\',
    #                         'model_ST={}_{}_mape={mape:.3f}_mae={mae:.3f}_time_lag{time}.h5'.format(
    #                             param.loop_num, param.time_intervals, mape=mape, mae=mae,
    #                             time=((1+param.predict_intervals)*5))))
    # history.loss_plot('epoch', param)
    return [mape, mae]
Beispiel #25
0
def Attmodel(train_data,label_data,val_data1,val_data2):
    model = Sequential()
    model.add(You.AttentionLayer_2D(supmask=False))
    model.add(layers.Dense(8,activation='relu', kernel_initializer=initializers.random_normal(seed=1),bias_initializer=initializers.random_normal(seed=2)))
    #model.add(layers.Dense(4,activation='relu'))
    model.add(layers.Dense(1,activation='sigmoid', kernel_initializer=initializers.random_normal(seed=3),bias_initializer=initializers.random_normal(seed=4)))
    model.compile(optimizer='Adam',loss='mse',metrics=['acc'])
    history = model.fit(train_data, label_data,validation_data=(val_data1,val_data2),epochs=100,batch_size=32)
    adloss = history.history['loss']
    advalacc=history.history['val_acc']
    model.summary()
    return adloss, advalacc
Beispiel #26
0
def centernet_head(x, num_classes):
    x = Dropout(rate=0.5)(x)
    #-------------------------------#
    #   解码器
    #-------------------------------#
    num_filters = 256
    # 16, 16, 2048  ->  32, 32, 256 -> 64, 64, 128 -> 128, 128, 64
    for i in range(3):
        # 进行上采样
        x = Conv2DTranspose(num_filters // pow(2, i), (4, 4),
                            strides=2,
                            use_bias=False,
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(5e-4))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
    # 最终获得128,128,64的特征层
    # hm header
    y1 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)
    y1 = Conv2D(num_classes,
                1,
                kernel_initializer=constant(0),
                bias_initializer=constant(-2.19),
                activation='sigmoid')(y1)

    # wh header
    y2 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)
    y2 = Conv2D(2, 1, kernel_initializer=random_normal(stddev=0.02))(y2)

    # reg header
    y3 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y3 = BatchNormalization()(y3)
    y3 = Activation('relu')(y3)
    y3 = Conv2D(2, 1, kernel_initializer=random_normal(stddev=0.02))(y3)
    return y1, y2, y3
def create_model():
    classifier = Sequential()

    # Adding a first convolutional layer
    classifier.add(
        Conv2D(16, (5, 5),
               input_shape=(80, 80, 3),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Adding a second convolutional layer
    classifier.add(
        Conv2D(32, (5, 5),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Adding a third convolutional layer
    classifier.add(
        Conv2D(48, (4, 4),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Flattening
    classifier.add(Flatten())

    #Full connection
    classifier.add(
        Dense(512,
              activation='relu',
              kernel_initializer=initializers.random_normal(stddev=0.02,
                                                            mean=0.00),
              bias_initializer=initializers.Constant(value=0.1)))

    # output layer
    classifier.add(
        Dense(11,
              activation='softmax',
              kernel_initializer=initializers.random_normal(stddev=0.02,
                                                            mean=0.00),
              bias_initializer=initializers.Constant(value=0.1)))

    return classifier
Beispiel #28
0
def cnn(height_a, height_q, width_a, width_q, extra_len):
    question_input = Input(shape=(height_q, width_q, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 128),
                     activation='sigmoid',
                     padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.01))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.5)(F1_Q)
    predictQ = Dense(64,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.01))(Drop1_Q)

    # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01)
    answer_input = Input(shape=(height_a, width_a, 1), name='answer_input')
    conv1_A = Conv2D(512, (2, 128),
                     activation='sigmoid',
                     padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.01))(answer_input)
    Max1_A = MaxPooling2D((319, 1), strides=(1, 1), padding='valid')(conv1_A)
    F1_A = Flatten()(Max1_A)
    Drop1_A = Dropout(0.5)(F1_A)
    predictA = Dense(64,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(
                         mean=0.0, stddev=0.01))(Drop1_A)

    extra_input = Input(shape=(extra_len, ), name='extra_input')
    predictQ1 = concatenate([predictQ, extra_input], axis=1)
    predictA1 = concatenate([predictA, extra_input], axis=1)
    predictions = merge([predictA1, predictQ1], mode='dot')
    model = Model(inputs=[question_input, answer_input, extra_input],
                  outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
 def _create_model(self):
     input_x = Input(shape=(self.n_features, ))
     x = Dense(10,
               kernel_initializer=initializers.random_normal(stddev=0.3),
               bias_initializer=initializers.Constant(0.1),
               activation='relu')(input_x)
     predictions = Dense(
         self.n_actions,
         kernel_initializer=initializers.random_normal(stddev=0.3),
         bias_initializer=initializers.Constant(0.1))(x)
     model = Model(inputs=input_x, outputs=predictions)
     model.compile(optimizer=optimizers.RMSprop(lr=self.lr),
                   loss='mean_squared_error')
     return model
Beispiel #30
0
def get_model(c, n1, n2, f1, f2, f3, fsub):
    input = Input(shape=(fsub, fsub, c))
    x = Conv2D(filters=n1,
               kernel_size=f1,
               activation='relu',
               kernel_initializer=random_normal(stddev=1e-3))(input)
    x = Conv2D(filters=n2,
               kernel_size=f2,
               activation='relu',
               kernel_initializer=random_normal(stddev=1e-3))(x)
    x = Conv2D(filters=c,
               kernel_size=f3,
               activation='relu',
               kernel_initializer=random_normal(stddev=1e-3))(x)
    return Model(inputs=input, outputs=x)
    def __init__(self, weights_path, train_conv_layers):
        self.__angle_values = [-1, -0.5, 0, 0.5, 1]

        self.__nb_actions = 5
        self.__gamma = 0.99

        #Define the model
        activation = 'relu'
        pic_input = Input(shape=(59,255,3))
        
        img_stack = Conv2D(16, (3, 3), name='convolution0', padding='same', activation=activation, trainable=train_conv_layers)(pic_input)
        img_stack = MaxPooling2D(pool_size=(2,2))(img_stack)
        img_stack = Conv2D(32, (3, 3), activation=activation, padding='same', name='convolution1', trainable=train_conv_layers)(img_stack)
        img_stack = MaxPooling2D(pool_size=(2, 2))(img_stack)
        img_stack = Conv2D(32, (3, 3), activation=activation, padding='same', name='convolution2', trainable=train_conv_layers)(img_stack)
        img_stack = MaxPooling2D(pool_size=(2, 2))(img_stack)
        img_stack = Flatten()(img_stack)
        img_stack = Dropout(0.2)(img_stack)

        img_stack = Dense(128, name='rl_dense', kernel_initializer=random_normal(stddev=0.01))(img_stack)
        img_stack=Dropout(0.2)(img_stack)
        output = Dense(self.__nb_actions, name='rl_output', kernel_initializer=random_normal(stddev=0.01))(img_stack)

        opt = Adam()
        self.__action_model = Model(inputs=[pic_input], outputs=output)

        self.__action_model.compile(optimizer=opt, loss='mean_squared_error')
        self.__action_model.summary()
        
        # If we are using pretrained weights for the conv layers, load them and verify the first layer.
        if (weights_path is not None and len(weights_path) > 0):
            print('Loading weights from my_model_weights.h5...')
            print('Current working dir is {0}'.format(os.getcwd()))
            self.__action_model.load_weights(weights_path, by_name=True)
            
            print('First layer: ')
            w = np.array(self.__action_model.get_weights()[0])
            print(w)
        else:
            print('Not loading weights')

        # Set up the target model. 
        # This is a trick that will allow the model to converge more rapidly.
        self.__action_context = tf.get_default_graph()
        self.__target_model = clone_model(self.__action_model)

        self.__target_context = tf.get_default_graph()
        self.__model_lock = threading.Lock()
 def __init__(self,
              initial_batch_size,
              units,
              num_blocks,
              num_units_per_block,
              vocab_size,
              keys,
              activation,
              weights=None,
              initializer='normal',
              bias_initializer='zeros',
              use_bias=True,
              **kwargs):
     super(REN, self).__init__(**kwargs)
     self.units = units
     self._num_blocks = num_blocks
     self._num_units_per_block = num_units_per_block
     self._vocab_size = vocab_size
     self._keys = keys
     self._activation = activation
     # self._activation = activation
     # if activation == 'prelu':
     #     self._activation = prelu
     # else:
     #     self._activation = activations.get(activation)
     self._initializer = initializers.random_normal(stddev=0.1)
     # self.ortho_initializer = tf.orthogonal_initializer(gain=1.0)
     self.initial_batch_size = initial_batch_size
     self.bias_initializer = initializers.get(bias_initializer)
     self.supports_masking = True
     self.use_bias = use_bias
     self.initial_weights = weights
def conv(x, nf, ks, name, weight_decay):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Conv2D(nf, (ks, ks), padding='same', name=name,
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    return x