示例#1
0
    def __init__(self,
                 action_bound,
                 input_shapes,
                 actions_output_shape=(4, ),
                 scope='actor_model_1',
                 cfg='actor_critic_1'):
        with tf.name_scope(scope):
            super(ActorModel, self).__init__(input_shapes, scope, cfg)

            dense_layer_sizes = self.config['dense_layers']['sizes']
            output_layer_params = self.config['output_params']

            # action cut off
            self.action_bound = action_bound

            # final dense layer for features
            self.dense5_features = \
                self.make_dense_layer(dense_layer_sizes['d5'])

            # returns the q-value for the action taken
            self.dense6 = \
                Dense(
                    units=actions_output_shape[0],
                    kernel_initializer=random_uniform(
                        minval=-output_layer_params['noise'],
                        maxval=output_layer_params['noise']),
                    bias_initializer=random_uniform(
                        -output_layer_params['noise'],
                        output_layer_params['noise']),
                    kernel_regularizer=l2(output_layer_params['l2']))
示例#2
0
    def make_conv2d_layer(self, filters, kernel_size, input_shape=None):
        """
        Makes a convolutional layer based on filter and kernel size and
        enforces an input shape if required

        Parameters
        ----------
        filters : int
            Filter size
        kernel_size: int
            Kernel size
        input_shape: tuple
            Shape of the input to the layer
        """
        noise = 1.0 / np.sqrt(filters * (kernel_size + 1))
        if input_shape is None:
            return Conv2D(filters=filters,
                          kernel_size=kernel_size,
                          kernel_initializer=random_uniform(minval=-noise,
                                                            maxval=noise),
                          bias_initializer=random_uniform(minval=-noise,
                                                          maxval=noise))
        else:
            return Conv2D(filters=filters,
                          kernel_size=kernel_size,
                          input_shape=input_shape,
                          kernel_initializer=random_uniform(minval=-noise,
                                                            maxval=noise),
                          bias_initializer=random_uniform(minval=-noise,
                                                          maxval=noise))
示例#3
0
    def make_dense_layer(self, units, input_shape=None):
        """
        Makes a dense layer based on the number of layer units and enforces
        an input shape if required

        Parameters
        ----------
        units : int
            Size of the layer
        input_shape: tuple
            Shape of the input to the layer
        """
        noise = 1.0 / np.sqrt(units)
        if input_shape is None:
            return \
                Dense(
                    units=units,
                    kernel_initializer=random_uniform(-noise, noise),
                    bias_initializer=random_uniform(-noise, noise))
        else:
            return \
                Dense(
                    units=units,
                    input_shape=input_shape,
                    kernel_initializer=random_uniform(-noise, noise),
                    bias_initializer=random_uniform(-noise, noise))
示例#4
0
    def __init__(self, vocabulary, output_dim, filters=(32, 32, 64, 128, 256, 512, 1024),
                 kernels=(1, 2, 3, 4, 5, 6, 7), char_dim=16, activation='tanh', highways=2,
                 embeddings_initializer=initializers.random_uniform(-1., 1.), max_len=50, reserved_words=None,
                 **kwargs):

        _reserved_words = [self.BOW_MARK, self.EOW_MARK]
        _reserved_words += [] if reserved_words is None else [r for r in reserved_words if r not in _reserved_words]
        _max_len = None if max_len is None else max_len - 2
        super().__init__(
            vocabulary, output_dim, embeddings_initializer=embeddings_initializer, max_len=_max_len,
            reserved_words=_reserved_words, **kwargs)
        self.max_len = _max_len

        if not filters or not isinstance(filters, list) or not all(map(lambda x: isinstance(x, int), filters)):
            raise ValueError('Expected "filters" argument to be a list of integers')
        if not kernels or not isinstance(kernels, list) or not all(map(lambda x: isinstance(x, int), kernels)):
            raise ValueError('Expected "kernels" argument to be a list of integers')
        if len(filters) != len(kernels):
            raise ValueError('Sizes of "filters" and "kernels" should be equal')
        self.filters = filters
        self.kernels = kernels

        self.char_dim = char_dim
        self.activation = activations.get(activation)
        self.highways = highways
示例#5
0
 def __init__(self,
              num_capsule,
              dim_capsule,
              routings=3,
              kernel_initializer='glorot_uniform',
              **kwargs):
     super(CapsuleLayer, self).__init__(**kwargs)
     self.num_capsule = num_capsule
     self.dim_capsule = dim_capsule
     self.routings = routings
     self.kernel_initializer = initializers.random_uniform(-1, 1)
    def __init__(self, input_dim: int, output_dim: int):
        """Constructor for PolicyGradient_tf class.
        
        Args:
            input_dim (int): Input dimension (number of features).
            output_dim (int): Output dimension (number of responses).
        """

        super().__init__()

        self.input_dim = input_dim
        self.output_dim = output_dim

        self.batchNorm1 = layers.BatchNormalization()
        self.dense1 = layers.Dense(
            64, input_shape=(input_dim+output_dim,),
            kernel_initializer=random_uniform(-np.sqrt(1/input_dim), np.sqrt(1/input_dim))
        )
        self.relu1 = layers.Activation('relu')
        self.dense2 = layers.Dense(32, kernel_initializer=random_uniform(-np.sqrt(1/64), np.sqrt(1/64)))
        self.relu2 = layers.Activation('relu')
        self.dense3 = layers.Dense(output_dim, kernel_initializer=random_uniform(-np.sqrt(1/32), np.sqrt(1/32)))
# %%
# 定义网络
struct = [3, 50, 50, 50, 50, 1]
# 定义优化器
optimizer = Adam(learning_rate=0.001)
tf.keras.backend.set_floatx("float64")
model = keras.models.Sequential()
# 采用Xavier初始化方法
for layer in range(len(struct) - 1):
    val = tf.cast(tf.sqrt(6 / (struct[layer] + struct[layer + 1])),
                  dtype=tf.float64)
    if layer == len(struct) - 2:
        model.add(
            keras.layers.Dense(units=struct[layer + 1],
                               kernel_initializer=initializers.random_uniform(
                                   -val, val)))
    elif layer == 0:
        model.add(
            keras.layers.Dense(units=struct[layer + 1],
                               input_dim=struct[layer],
                               activation='tanh',
                               kernel_initializer=initializers.random_uniform(
                                   -val, val)))
    else:
        model.add(
            keras.layers.Dense(units=struct[layer + 1],
                               activation='tanh',
                               kernel_initializer=initializers.random_uniform(
                                   -val, val)))

# %%
# 声明训练集为tf.Variable变量,作为网络输入
x = tf.Variable(tf.reshape(tf.cast(xyt_train[:,0], dtype=tf.float64),[xyt_train.shape[0],1]))
y = tf.Variable(tf.reshape(tf.cast(xyt_train[:,1], dtype=tf.float64),[xyt_train.shape[0],1]))
t = tf.Variable(tf.reshape(tf.cast(xyt_train[:,2], dtype=tf.float64),[xyt_train.shape[0],1]))


# %%
# 定义网络结构
struct = [3,50,50,50,50,1]
tf.keras.backend.set_floatx("float64")
model = keras.models.Sequential()
# 采用Xavier初始化方法
for layer in range(len(struct)-1):
    val = tf.cast(tf.sqrt(6/(struct[layer]+struct[layer+1])),dtype=tf.float64)
    if layer == len(struct)-2:
        model.add(keras.layers.Dense(units=struct[layer+1],kernel_initializer=initializers.random_uniform(-val,val)))
    elif layer == 0:
        model.add(keras.layers.Dense(units=struct[layer+1],input_dim=struct[layer],activation='tanh',
                                     kernel_initializer=initializers.random_uniform(-val,val)))
    else:
        model.add(keras.layers.Dense(units=struct[layer+1],activation='tanh',
                                     kernel_initializer=initializers.random_uniform(-val,val)))


# %%
model.summary()


# %%
# 定义残差函数(导数增长损失函数)
def residual(Psi_xx,Psi_yy,Psi_t):
        plt.title(f'Layer {n}')
        plt.axis('off')

    plt.colorbar()
    plt.suptitle('Weight matrices variation')

    plt.show()


model = Sequential([
    Dense(
        units=4,
        input_shape=(4, ),
        activation=relu,
        trainable=False,  # to freeze the layer
        kernel_initializer=random_uniform(),
        bias_initializer=ones()),
    Dense(units=2,
          activation=relu,
          kernel_initializer=lecun_uniform(),
          bias_initializer=zeros()),
    Dense(units=4, activation=softmax)
])

model.summary()

W0_layers = get_weights(model)
b0_layers = get_biases(model)

X_train = np.random.random((100, 4))
y_train = X_train