예제 #1
0
    def __init__(self, width, depth, num_classes=20, num_anchors=9, freeze_bn=False, name='class_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_classes = num_classes
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                                **options)
                        for i in range(depth)]
        self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
                                            bias_initializer=PriorProbability(probability=0.01),
                                            name=f'{self.name}/class-predict', **options)

        self.bns = [
            [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
             in range(3, 8)]
            for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, num_classes))
        self.activation = layers.Activation('sigmoid')
예제 #2
0
    def __init__(self, width, depth, num_anchors=9, name='box_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'bias_initializer': 'zeros',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [
            layers.SeparableConv2D(filters=width,
                                   name=f'{self.name}/box-{i}',
                                   **options) for i in range(depth)
        ]
        self.head = layers.SeparableConv2D(filters=num_anchors * 4,
                                           name=f'{self.name}/box-predict',
                                           **options)

        self.bns = [[
            layers.BatchNormalization(momentum=MOMENTUM,
                                      epsilon=EPSILON,
                                      name=f'{self.name}/box-{i}-bn-{j}')
            for j in range(3, 8)
        ] for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, 4))
예제 #3
0
    def get_mf_MLP(self,input_dim1,input_dim2,output_dim,MLP_layers):
        """
        返回id-embedding-merge-mlp的model
        """
        # Input Layer
        user_input = Input(shape=(1,), dtype='int32')
        item_input = Input(shape=(1,), dtype='int32')

        # Embedding layer
        MF_Embedding_User = Embedding(input_dim=input_dim1, output_dim=output_dim,
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        MF_Embedding_Item = Embedding(input_dim=input_dim2, output_dim=output_dim,
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        # MF part
        mf_user_latent = Flatten()(MF_Embedding_User(user_input))
        mf_item_latent = Flatten()(MF_Embedding_Item(item_input))  # why Flatten?
        mf_vector = concatenate([mf_user_latent, mf_item_latent])

        for idx in range(len(MLP_layers)):   # 学习非线性关系
            layer = Dense(MLP_layers[idx],  activation='relu')
            mf_vector = layer(mf_vector)
        model = Model(inputs=[user_input,item_input],outputs=mf_vector)
        return model
예제 #4
0
def m6_1():
    model.add(
        Conv2D(32, (3, 3),
               kernel_initializer=initializers.VarianceScaling(scale=0.1),
               input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(
        Conv2D(32, (3, 3),
               kernel_initializer=initializers.VarianceScaling(scale=0.1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Conv2D(64, (3, 3),
               kernel_initializer=initializers.VarianceScaling(scale=0.1)))
    model.add(Activation('relu'))
    model.add(
        Conv2D(64, (3, 3),
               kernel_initializer=initializers.VarianceScaling(scale=0.1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(
        Dense(256, kernel_initializer=initializers.VarianceScaling(scale=0.1)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
예제 #5
0
 def build_network_actor(self, input_dim, output_dim, hidden_dims, lr):
     self.model.add(Dense(hidden_dims[0], kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform', seed=None), activation='relu', input_shape=(input_dim,)))
     self.model.add(Dense(hidden_dims[1], kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform', seed=None), activation='relu'))
     self.model.add(Dense(hidden_dims[2], kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform', seed=None), activation='relu'))
     self.model.add(Dense(output_dim, kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform', seed=None), activation='softmax'))
     self.model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.adam(lr))
     print("Actor...")
     print(self.model.get_config())
     # print(self.model.summary())
     print('lr: ', lr)
     print()
예제 #6
0
    def get_model(self):
        num_layer = len(self.layers)  # Number of layers in the MLP
        # Input variables
        user_input = Input(shape=(1,), dtype='int32', name='user_input')
        item_input = Input(shape=(1,), dtype='int32', name='item_input')

        # Embedding layer
        MF_Embedding_User = Embedding(input_dim=self.num_users, output_dim=self.mf_embedding_dim, name='mf_embedding_user',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(self.reg_mf), input_length=1) #

        MF_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=self.mf_embedding_dim, name='mf_embedding_item',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(self.reg_mf), input_length=1) #

        MLP_Embedding_User = Embedding(input_dim=self.num_users, output_dim=int(self.mf_fc_unit_nums[0] / 2), name="mlp_embedding_user",
                                       embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                       embeddings_regularizer=l2(self.reg_layers[0]), input_length=1) #

        MLP_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=int(self.mf_fc_unit_nums[0] / 2), name='mlp_embedding_item',
                                       embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                       embeddings_regularizer=l2(self.reg_layers[0]), input_length=1) #

        # MF part
        mf_user_latent = Flatten()(MF_Embedding_User(user_input))
        mf_item_latent = Flatten()(MF_Embedding_Item(item_input))
        #   mf_vector = merge([mf_user_latent, mf_item_latent], mode='mul')  # element-wise multiply
        mf_vector=Multiply()([mf_user_latent, mf_item_latent])

        # MLP part
        mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))
        mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))
        #   mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode='concat')
        mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent])

        for idx in range(1, num_layer):
            layer = Dense(self.mf_fc_unit_nums[idx],  activation='relu', name="layer%d" % idx) # kernel_regularizer=l2(reg_layers[idx]),
            mlp_vector = layer(mlp_vector)

        # Concatenate MF and MLP parts
        # mf_vector = Lambda(lambda x: x * alpha)(mf_vector)
        # mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)
        #   predict_vector = merge([mf_vector, mlp_vector], mode='concat')
        predict_vector = Concatenate()([mf_vector, mlp_vector])

        # Final prediction layer
        prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        model = Model(input=[user_input, item_input],output=prediction)
        return model
예제 #7
0
    def get_small_model(self):
        set_kernel_initializer = initializers.VarianceScaling(
            scale=1.0, mode='fan_avg', distribution='normal', seed=None)
        set_bias_initializer = initializers.RandomUniform(minval=-0.5,
                                                          maxval=0.5,
                                                          seed=None)
        model = Sequential()
        model.add(
            Conv2D(30, (self.kernel_size, self.kernel_size),
                   padding='same',
                   input_shape=(self.img_rows, self.img_cols,
                                self.channel_dim),
                   kernel_initializer=set_kernel_initializer,
                   bias_initializer=set_bias_initializer))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(self.pool_size, self.pool_size)))

        model.add(UpSampling2D(size=(self.pool_size, self.pool_size)))
        model.add(
            Conv2D(3, (self.kernel_size, self.kernel_size),
                   padding='same',
                   kernel_initializer=set_kernel_initializer,
                   bias_initializer=set_bias_initializer))
        model.add(Activation('tanh'))
        return (model)
예제 #8
0
def get_kernel_init(type, param=None, seed=None):
    kernel_init = None
    if type == 'glorot_uniform':
        kernel_init = initializers.glorot_uniform(seed=seed)
    elif type == 'VarianceScaling':
        kernel_init = initializers.VarianceScaling(seed=seed)
    elif type == 'RandomNormal':
        if param is None:
            param = 0.04
        kernel_init = initializers.RandomNormal(mean=0.0,
                                                stddev=param,
                                                seed=seed)
    elif type == 'TruncatedNormal':
        if param is None:
            param = 0.045  # Best for non-normalized coordinates
            # param = 0.09 # "Best" for normalized coordinates
        kernel_init = initializers.TruncatedNormal(mean=0.0,
                                                   stddev=param,
                                                   seed=seed)
    elif type == 'RandomUniform':
        if param is None:
            param = 0.055  # Best for non-normalized coordinates
            # param = ?? # "Best" for normalized coordinates
        kernel_init = initializers.RandomUniform(minval=-param,
                                                 maxval=param,
                                                 seed=seed)

    return kernel_init
예제 #9
0
def get_layer_opts():
    return dict(
        activation='relu',
        kernel_initializer=initializers.VarianceScaling(scale=1.0,
                                                        mode='fan_in',
                                                        distribution='normal',
                                                        seed=seed))
예제 #10
0
def conv2d_bn(X,
              nb_filter,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              use_bias=False):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    X = Convolution2D(nb_filter, (num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.000004),
                      kernel_initializer=initializers.VarianceScaling(
                          scale=2.0,
                          mode='fan_in',
                          distribution='normal',
                          seed=None))(X)
    X = BatchNormalization(axis=channel_axis, momentum=0.999997,
                           scale=False)(X)
    X = Activation('relu')(X)
    return X
예제 #11
0
def conv2d_bn(x,
              nb_filter,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              use_bias=False):
    """
    Utility function to apply conv + BN.
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, (num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.00004),
                      kernel_initializer=initializers.VarianceScaling(
                          scale=2.0,
                          mode='fan_in',
                          distribution='normal',
                          seed=None))(x)
    x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
    x = Activation('relu')(x)
    return x
def create_func():
    model = Sequential()

    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               kernel_initializer=initializers.VarianceScaling(),
               input_shape=input_size,
               activation=acti,
               strides=(1, 1)))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2D(64, (3, 3),
               padding='same',
               kernel_initializer=initializers.VarianceScaling(),
               activation=acti,
               strides=(1, 1)))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(
        Conv2D(128, (3, 3),
               padding='same',
               kernel_initializer=initializers.VarianceScaling(),
               activation=acti,
               strides=(1, 1)))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(Flatten())

    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    return model
예제 #13
0
def create_model():
    n_inputs = num_PC
    n_hidden1 = 10
    n_hidden2 = 10
    n_outputs = 1
    Xavier_init = initializers.VarianceScaling(scale=1.0,
                                               mode='fan_avg',
                                               distribution='normal')

    with tf.name_scope("Ga"):
        hidden_Ga1 = layers.Dense(n_hidden1,
                                  name="hidden_Ga1",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        hidden_Ga2 = layers.Dense(n_hidden2,
                                  name="hidden_Ga2",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        output_Ga = layers.Dense(n_outputs,
                                 name="output_Ga",
                                 activation=None,
                                 kernel_initializer=Xavier_init)

        input_Ga = Input(shape=(20, n_inputs),
                         dtype='float32',
                         name='input_Ga')
        y_Ga = hidden_Ga1(input_Ga)
        y_Ga = hidden_Ga2(y_Ga)
        y_Ga = output_Ga(y_Ga)
        sum_Ga = layers.Lambda(lambda x: backend.sum(x, axis=1),
                               name='sum_Ga')(y_Ga)

    with tf.name_scope("As"):
        hidden_As1 = layers.Dense(n_hidden1,
                                  name="hidden_As1",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        hidden_As2 = layers.Dense(n_hidden2,
                                  name="hidden_As2",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        output_As = layers.Dense(n_outputs,
                                 name="output_As",
                                 activation=None,
                                 kernel_initializer=Xavier_init)

        input_As = Input(shape=(20, n_inputs),
                         dtype='float32',
                         name='input_As')
        y_As = hidden_As1(input_As)
        y_As = hidden_As2(y_As)
        y_As = output_As(y_As)
        sum_As = layers.Lambda(lambda x: backend.sum(x, axis=1),
                               name='sum_As')(y_As)

    total_E = layers.add([sum_Ga, sum_As], name="total_E")
    return Model([input_Ga, input_As], total_E)
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        # Set up random uniform weight initializer
        rand_uniform = initializers.VarianceScaling(scale=1.0,
                                                    mode='fan_in',
                                                    distribution='uniform')

        net_states = layers.Dense(units=32,
                                  kernel_initializer=rand_uniform)(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)
        net_states = layers.Dropout(0.3)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=32,
                                   kernel_initializer=rand_uniform)(actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)
        net_actions = layers.Dropout(0.3)(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed
        net = layers.Dense(units=16, kernel_initializer=rand_uniform)(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.Dropout(0.3)(net)

        # Add final output layer to produce action values (Q values)
        random_uniform = initializers.RandomUniform(minval=-0.003,
                                                    maxval=0.003)
        Q_values = layers.Dense(units=1, kernel_initializer=random_uniform, kernel_regularizer=regularizers.l2(0.01), \
                activity_regularizer=regularizers.l2(0.01), name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=0.001)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
예제 #15
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.
        final_layer_initializer = initializers.RandomUniform(minval=-0.003,
                                                             maxval=0.003,
                                                             seed=None)
        kernel_initializer = initializers.VarianceScaling(
            scale=1.0, mode='fan_in', distribution='uniform', seed=None)
        activation = layers.LeakyReLU(alpha=0.3)
        activation = 'relu'

        # Add hidden layers
        net = layers.Dense(units=400,
                           activation=activation,
                           kernel_initializer=kernel_initializer)(states)
        net = layers.Dense(units=300,
                           activation=activation,
                           kernel_initializer=kernel_initializer)(net)

        # Add final output layer with sigmoid or tanh activation
        raw_actions = layers.Dense(
            units=self.action_size,
            activation='sigmoid',
            name='raw_actions',
            kernel_initializer=final_layer_initializer)(net)

        #middle_value_of_action_range = self.action_low + self.action_range/2
        #actions = layers.Lambda(lambda x: (x * self.action_range) + middle_value_of_action_range,
        #    name='actions')(raw_actions)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=0.0001)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
예제 #16
0
    def __init__(self,
                 xtrain,
                 ytrain,
                 xval,
                 yval,
                 xtest,
                 ytest,
                 wi=14,
                 dr=0.4,
                 ac='relu',
                 acpar=0.1,
                 bs=2048):

        # INITALIZE HYPERPARAMETERS ###
        self.width = wi  # Integer
        self.droprate = dr  # Float 0 <= x < 1
        self.activation = ac  # String 'relu' 'elu' 'sigmoid' etc.
        self.activation_par = acpar
        self.batchsize = bs  # Integer
        self.x_train = xtrain
        self.x_validate = xval
        self.x_test = xtest
        self.y_train = ytrain
        self.y_validate = yval
        self.y_test = ytest

        # GENERATE PATHNAME
        self.name = '{}{}{}{}{}'.format(self.activation, self.batchsize,
                                        self.droprate, self.width,
                                        self.activation_par)
        self.path = '{}{}'.format('../Data/Results/AutoEncoder/', self.name)

        # INITALIZE CHOICE OF KERAS FUNCTIONS #
        self.model = Sequential()

        self.sgd = optimizers.SGD(lr=0.01, momentum=0.001, decay=0.001)
        self.adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
        self.adam = optimizers.Adam(lr=0.001,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=10e-8,
                                    decay=0.001,
                                    amsgrad=False)
        self.cb = callbacks.EarlyStopping(monitor='val_loss',
                                          min_delta=0.0001,
                                          patience=50,
                                          verbose=1,
                                          mode='min',
                                          baseline=None,
                                          restore_best_weights=True)
        initializers.VarianceScaling(scale=1.0,
                                     mode='fan_in',
                                     distribution='normal',
                                     seed=None)
        initializers.he_normal(151119)
        initializers.Zeros()
예제 #17
0
def classification_coco(fpn_features, w_head, d_head, num_anchors,
                        num_classes):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'depthwise_initializer': initializers.VarianceScaling(),
        'pointwise_initializer': initializers.VarianceScaling(),
    }
    cls_convs = [
        layers.SeparableConv2D(filters=w_head,
                               bias_initializer='zeros',
                               name=f'class_net/class-{i}',
                               **options) for i in range(d_head)
    ]
    cls_head_conv = layers.SeparableConv2D(
        filters=num_classes * num_anchors,
        bias_initializer=PriorProbability(probability=3e-4),
        name='class_net/class-predict',
        **options)
    cls_bns = [[
        layers.BatchNormalization(momentum=MOMENTUM,
                                  epsilon=EPSILON,
                                  name=f'class_net/class-{i}-bn-{j}')
        for j in range(3, 8)
    ] for i in range(d_head)]
    cls_relu = layers.Lambda(lambda x: tf.nn.swish(x))
    classification = []
    cls_reshape = layers.Reshape((-1, num_classes))
    cls_activation = layers.Activation('sigmoid')
    for i, feature in enumerate(fpn_features):
        for j in range(d_head):
            feature = cls_convs[j](feature)
            feature = cls_bns[j][i](feature)
            feature = cls_relu(feature)
        feature = cls_head_conv(feature)
        feature = cls_reshape(feature)
        feature = cls_activation(feature)
        classification.append(feature)
    classification = layers.Concatenate(axis=1,
                                        name='classification')(classification)
    return classification
예제 #18
0
def RCL(input, kernel_size, filedepth):
  if K.image_data_format() == 'channels_first':
    channel_axis = 1
  else:
    channel_axis = -1

  conv1 = Convolution2D(filters=filedepth, kernel_size=kernel_size, strides=(1, 1), padding='same',
                 kernel_regularizer=regularizers.l2(0.00004),
                 kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(input)

  stack2 = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(conv1)
  stack2 = Activation('relu')(stack2)

  RCL = Convolution2D(filters=filedepth, kernel_size=kernel_size, strides=(1, 1), padding='same', 
                 kernel_regularizer=regularizers.l2(0.00004),
                 kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))

  conv2 = RCL(stack2)
  stack3 = Add()([conv1, conv2])
  stack4 = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(stack3)
  stack4 = Activation('relu')(stack4)


  conv3 = Convolution2D(filters=filedepth, kernel_size=kernel_size, strides=(1, 1), padding='same',
                 weights=RCL.get_weights(),
                 kernel_regularizer=regularizers.l2(0.00004),
                 kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(stack4)
  stack5 = Add()([conv1, conv3])
  stack6 = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(stack5)
  stack6 = Activation('relu')(stack6)


  conv4 = Convolution2D(filters=filedepth, kernel_size=kernel_size, strides=(1, 1), padding='same',
                 weights=RCL.get_weights(),
                 kernel_regularizer=regularizers.l2(0.00004),
                 kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(stack6)
  stack7 = Add()([conv1, conv4])
  stack8 = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(stack7)
  stack8 = Activation('relu')(stack8)

  return stack8
예제 #19
0
    def __init__(self,
                 layer_sizes: List[int],
                 layer_activations: List[Any],
                 state_shape: tuple,
                 action_shape: tuple,
                 layer_and_batch_norm: bool,
                 l2_param_penalty: float = 0.00,
                 **kwargs):

        super().__init__(layer_sizes, layer_activations, state_shape,
                         action_shape, 0, layer_and_batch_norm,
                         l2_param_penalty)

        hidden_init = initializers.VarianceScaling(scale=1 / 3,
                                                   mode='fan_in',
                                                   distribution='uniform',
                                                   seed=None)
        # hidden_init = None
        final_init = initializers.RandomUniform(minval=-3e-3,
                                                maxval=3e-3,
                                                seed=None)

        state = tf.keras.Input(shape=state_shape, name='state_input')
        h = state

        for i in range(len(layer_sizes)):
            h = self.layer_with_layer_norm(h,
                                           i,
                                           'policy',
                                           ln_bias=0.,
                                           initializers=hidden_init)

        ap = layers.Dense(units=action_shape[0],
                          activation='tanh',
                          name='policy_final',
                          bias_initializer=final_init,
                          kernel_initializer=final_init)(h)
        h = state

        for i in range(len(layer_sizes)):
            h = self.layer_with_layer_norm(h,
                                           i,
                                           'noise_policy',
                                           ln_bias=0.,
                                           initializers=hidden_init)

        np = layers.Dense(units=action_shape[0],
                          activation='tanh',
                          name='noise_policy_final',
                          bias_initializer=final_init,
                          kernel_initializer=final_init)(h)

        self.model = tf.keras.Model(inputs=[state], outputs=[ap, np])
예제 #20
0
def properties_sand(fpn_features, w_head, d_head, num_anchors, num_properties):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'depthwise_initializer': initializers.VarianceScaling(),
        'pointwise_initializer': initializers.VarianceScaling(),
    }
    pro_convs = [
        layers.SeparableConv2D(filters=w_head,
                               bias_initializer='zeros',
                               name=f'property_net/property-{i}',
                               **options) for i in range(d_head)
    ]
    pro_head_conv = layers.SeparableConv2D(
        filters=num_properties * num_anchors,
        bias_initializer='zeros',
        name='property_net/property-predict',
        **options)
    pro_bns = [[
        layers.BatchNormalization(momentum=MOMENTUM,
                                  epsilon=EPSILON,
                                  name=f'property_net/property-{i}-bn-{j}')
        for j in range(3, 8)
    ] for i in range(d_head)]
    pro_relu = layers.Lambda(lambda x: tf.nn.swish(x))
    pro = []
    pro_reshape = layers.Reshape((-1, num_properties))
    pro_activation = layers.Activation('softmax')
    for i, feature in enumerate(fpn_features):
        for j in range(d_head):
            feature = pro_convs[j](feature)
            feature = pro_bns[j][i](feature)
            feature = pro_relu(feature)
        feature = pro_head_conv(feature)
        feature = pro_reshape(feature)
        feature = pro_activation(feature)
        pro.append(feature)
    pro = layers.Concatenate(axis=1, name='pro_sand')(pro)
    return pro
예제 #21
0
 def create_actor_network(self, state_size, action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     #Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     #Acceleration = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name), kernel_regularizer=regularizers.l2(0.01))(h1)
     Acceleration = Dense(1,
                          activation='tanh',
                          use_bias=True,
                          kernel_initializer=initializers.VarianceScaling(
                              scale=1e-4,
                              mode='fan_in',
                              distribution='normal',
                              seed=None),
                          bias_initializer='zeros',
                          kernel_regularizer=regularizers.l2(0.01),
                          bias_regularizer=None,
                          activity_regularizer=None,
                          kernel_constraint=None,
                          bias_constraint=None)(h1)
     LaneChanging = Dense(1,
                          activation='sigmoid',
                          use_bias=True,
                          kernel_initializer=initializers.VarianceScaling(
                              scale=1e-4,
                              mode='fan_in',
                              distribution='normal',
                              seed=None),
                          bias_initializer='zeros',
                          kernel_regularizer=regularizers.l2(0.01),
                          bias_regularizer=None,
                          activity_regularizer=None,
                          kernel_constraint=None,
                          bias_constraint=None)(h1)
     #Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     V = merge([LaneChanging, Acceleration], mode='concat')
     model = Model(input=S, output=V)
     return model, model.trainable_weights, S
예제 #22
0
파일: cppn.py 프로젝트: wottpal/cppn-keras
def build_model(variance, bw = False, depth = 32):
    """Builds and returns CPPN."""
    input_shape=(4,)
    init = initializers.VarianceScaling(scale=variance)

    model = models.Sequential()
    model.add(layers.Dense(depth, kernel_initializer=init, activation='tanh', input_shape=input_shape))
    model.add(layers.Dense(depth, kernel_initializer=init, activation='tanh'))
    model.add(layers.Dense(depth, kernel_initializer=init, activation='tanh'))
    model.add(layers.Dense(1 if bw else 3, activation='tanh'))
    
    model.compile(optimizer='rmsprop', loss='mse')
    return model
예제 #23
0
    def get_model(self):
        # Input Layer
        user_input = Input(shape=(1,), dtype='int32', name='user_input')
        item_input = Input(shape=(1,), dtype='int32', name='item_input')
        text_input = Input(shape=(self.feature_size,), dtype='float32', name='text_input')

        # Embedding layer
        MF_Embedding_User = Embedding(input_dim=self.num_users, output_dim=self.mf_embedding_dim, name='mf_embedding_user',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        MF_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=self.mf_embedding_dim, name='mf_embedding_item',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        # MF part
        mf_user_latent = Flatten()(MF_Embedding_User(user_input))
        mf_item_latent = Flatten()(MF_Embedding_Item(item_input))  # why Flatten?
        mf_vector = concatenate([mf_user_latent, mf_item_latent])  # element-wise multiply    ???

        for idx in range(len(self.mf_fc_unit_nums)):   # 学习非线性关系
            layer = Dense(self.mf_fc_unit_nums[idx],  activation='relu', name="layer%d" % idx)
            mf_vector = layer(mf_vector)

        # Text part
        # text_input = Dense(10, activation='relu', kernel_regularizer=l2(0.01))(text_input)  #   sim? 需要再使用MLP处理下?

        # Concatenate MF and TEXT parts
        predict_vector = concatenate([mf_vector, text_input])

        for idx in range(len(self.layers2)):   # 整合后再加上MLP?
            layer = Dense(self.layers2[idx],  activation='relu')# name="layer%d"  % idx
            predict_vector = layer(predict_vector)

        predict_vector = Dropout(0.5)(predict_vector)    # 使用dropout?

        # Final prediction layer
        predict_vector = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        model = Model(inputs=[user_input, item_input, text_input],outputs=predict_vector)
        return model
def getInitializer(init_name, learning_rate, opt, functions):

    if init_name == "rnormal":
        init = initializers.RandomNormal()
    elif init_name == "runiform":
        init = initializers.RandomUniform()
    elif init_name == "varscaling":
        init = initializers.VarianceScaling()
    elif init_name == "orth":
        init = initializers.Orthogonal()
    elif init_name == "id":
        init = initializers.Identity()
    elif init_name == "lecun_uniform":
        init = initializers.lecun_uniform()
    elif init_name == "glorot_normal":
        init = initializers.glorot_normal()
    elif init_name == "glorot_uniform":
        init = initializers.glorot_uniform()
    elif init_name == "he_normal":
        init = initializers.he_normal()
    elif init_name == "he_uniform":
        init = initializers.he_uniform()

    if opt == "Adam":
        optimizer = optimizers.Adam(lr=learning_rate)
    elif opt == "Adagrad":
        optimizer = optimizers.Adagrad(lr=learning_rate)
    elif opt == "Adadelta":
        optimizer = optimizers.Adadelta(lr=learning_rate)
    elif opt == "Adamax":
        optimizer = optimizers.Adamax(lr=learning_rate)
    elif opt == "Nadam":
        optimizer = optimizers.Nadam(lr=learning_rate)
    elif opt == "sgd":
        optimizer = optimizers.SGD(lr=learning_rate)
    elif opt == "RMSprop":
        optimizer = optimizers.RMSprop(lr=learning_rate)

    if functions.startswith("maxout"):
        functions, maxout_k = functions.split("-")
        maxout_k = int(maxout_k)
    else:
        maxout_k = 3
    if functions.startswith("leakyrelu"):
        if "-" in functions:
            functions, maxout_k = functions.split("-")
            maxout_k = float(maxout_k)
        else:
            maxout_k = 0.01

    return init, optimizer, functions, maxout_k
예제 #25
0
def regression_sand(fpn_features, w_head, d_head, num_anchors):
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'depthwise_initializer': initializers.VarianceScaling(),
        'pointwise_initializer': initializers.VarianceScaling(),
    }
    box_convs = [
        layers.SeparableConv2D(filters=w_head,
                               bias_initializer='zeros',
                               name=f'box_net/box-{i}',
                               **options) for i in range(d_head)
    ]
    box_head_conv = layers.SeparableConv2D(filters=4 * num_anchors,
                                           bias_initializer='zeros',
                                           name=f'box_net/box-predict',
                                           **options)
    box_bns = [[
        layers.BatchNormalization(momentum=MOMENTUM,
                                  epsilon=EPSILON,
                                  name=f'box_net/box-{i}-bn-{j}')
        for j in range(3, 8)
    ] for i in range(d_head)]

    box_relu = layers.Lambda(lambda x: tf.nn.swish(x))
    box_reshape = layers.Reshape((-1, 4))
    regression = []
    for i, feature in enumerate(fpn_features):
        for j in range(d_head):
            feature = box_convs[j](feature)
            feature = box_bns[j][i](feature)
            feature = box_relu(feature)
        feature = box_head_conv(feature)
        feature = box_reshape(feature)
        regression.append(feature)
    regression = layers.Concatenate(axis=1, name='regression_sand')(regression)
    return regression
예제 #26
0
def create_model(n_features=8,
                 n_features_cat=3,
                 n_dense_layers=3,
                 activation='tanh',
                 with_bias=False):
    # continuous features
    # [b'PF_dxy', b'PF_dz', b'PF_eta', b'PF_mass', b'PF_puppiWeight', b'PF_charge', b'PF_fromPV', b'PF_pdgId',  b'PF_px', b'PF_py']
    inputs_cont = Input(shape=(maxNPF, n_features), name='input')
    pxpy = Lambda(lambda x: slice(x, (0, 0, n_features - 2), (-1, -1, -1)))(
        inputs_cont)

    embeddings = []
    for i_emb in range(n_features_cat):
        input_cat = Input(shape=(maxNPF, 1), name='input_cat{}'.format(i_emb))
        if i_emb == 0:
            inputs = [inputs_cont, input_cat]
        else:
            inputs.append(input_cat)
        embedding = Embedding(input_dim=emb_input_dim[i_emb],
                              output_dim=emb_out_dim,
                              embeddings_initializer=initializers.RandomNormal(
                                  mean=0., stddev=0.4 / emb_out_dim),
                              name='embedding{}'.format(i_emb))(input_cat)
        embedding = Reshape((maxNPF, 8))(embedding)
        embeddings.append(embedding)

    x = Concatenate()([inputs[0]] + [emb for emb in embeddings])

    for i_dense in range(n_dense_layers):
        x = Dense(8 * 2**(n_dense_layers - i_dense),
                  activation=activation,
                  kernel_initializer='lecun_uniform')(x)
        x = BatchNormalization(momentum=0.95)(x)

    # List of weights. Increase to 3 when operating with biases
    # Expect typical weights to not be of order 1 but somewhat smaller, so apply explicit scaling
    x = Dense(3 if with_bias else 1,
              activation='linear',
              kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
    #print('Shape of last dense layer', x.shape)

    x = Concatenate()([x, pxpy])
    x = weighted_sum_layer(with_bias,
                           name="weighted_sum" if with_bias else "output")(x)

    if with_bias:
        x = Dense(2, activation='linear', name='output')(x)

    outputs = x
    return inputs, outputs
예제 #27
0
def build_model(values, n_layers):
    init = initializers.VarianceScaling(scale=values['variance'])
    model = models.Sequential()
    model.add(layers.InputLayer(input_shape=(4, )))
    for i in range(1, n_layers + 1):
        n_neurons = int(values['l{}_n'.format(i)])
        activation = values['l{}_a'.format(i)]
        model.add(
            layers.Dense(n_neurons,
                         kernel_initializer=init,
                         activation=activation))
    model.add(layers.Dense(3, activation=values['lout_a']))
    model.compile(optimizer='rmsprop', loss='mse')

    return model
예제 #28
0
 def build_network(self):
     initialisation = initializers.VarianceScaling(scale=self.variance,
                                                   distribution="normal",
                                                   seed=int(self.seed))
     inputs = Input(shape=(4, ))
     x = inputs
     for layer_number in range(self.layer_list.count()):
         x = Dense(
             self.unit_list[layer_number].value(),
             activation=self.activation_list[layer_number].currentText(),
             kernel_initializer=initialisation)(x)
     output = Dense(3,
                    activation='linear',
                    kernel_initializer=initialisation)(x)
     self.model = Model(inputs=inputs, outputs=output)
예제 #29
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.
        final_layer_initializer = initializers.RandomUniform(minval=-0.003,
                                                             maxval=0.003,
                                                             seed=None)
        kernel_initializer = initializers.VarianceScaling(
            scale=1.0, mode='fan_in', distribution='uniform', seed=None)

        # Add hidden layers
        net = layers.Dense(units=400,
                           activation='elu',
                           kernel_initializer=kernel_initializer)(states)
        net = layers.Dense(units=300,
                           activation='elu',
                           kernel_initializer=kernel_initializer)(net)

        # Add final output layer with tanh activation - this already outputs actions in desired range -1 to 1
        actions = layers.Dense(units=self.action_size,
                               activation='tanh',
                               name='raw_actions',
                               kernel_initializer=final_layer_initializer)(net)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=0.0001)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
예제 #30
0
파일: inception.py 프로젝트: wsyxbcl/ENML
def conv1d_bn(x, nb_filter, len_filter, padding='same', strides=1):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/kentsommer/keras-inceptionV4/inception_v4.py)

    """
    channel_axis = -1
    x = Conv1D(
        nb_filter,
        len_filter,
        strides=strides,
        padding=padding,
        kernel_regularizer=regularizers.l2(0.00004),
        kernel_initializer=initializers.VarianceScaling(scale=2.0,
                                                        mode='fan_in',
                                                        distribution='normal',
                                                        seed=None))(x)
    # x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
    x = Activation('relu')(x)
    return x