コード例 #1
0
def _shadowdata_generator_model(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose, slim.convolution1d],
            # weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_initializer=initializers.zeros(),
            # weights_regularizer=slim.l1_l2_regularizer(),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1)),
            trainable=is_training,
            data_format="NHWC"):
        num_filters = 1
        band_size = netinput.get_shape()[3].value
        kernel_size = band_size

        net0 = tf.expand_dims(tf.squeeze(netinput, axis=[1, 2]), axis=2)
        net1 = slim.convolution1d(net0,
                                  num_filters,
                                  kernel_size,
                                  padding='SAME')
        net1 = net1 + net0

        net2 = slim.convolution1d(net1,
                                  num_filters,
                                  kernel_size // 2,
                                  padding='SAME')
        net2 = net2 + net1 + net0

        net3 = slim.convolution1d(net2,
                                  num_filters,
                                  kernel_size // 4,
                                  padding='SAME')
        net3 = net3 + net2 + net1

        net4 = slim.convolution1d(net3,
                                  num_filters,
                                  kernel_size // 8,
                                  padding='SAME')
        net4 = net4 + net3 + net2

        net5 = slim.convolution1d(net4,
                                  num_filters,
                                  kernel_size // 4,
                                  padding='SAME')
        net5 = net5 + net4 + net3

        net6 = slim.convolution1d(net5,
                                  num_filters,
                                  kernel_size // 2,
                                  padding='SAME')
        net6 = net6 + net5 + net4

        net7 = slim.convolution1d(net6,
                                  num_filters,
                                  kernel_size,
                                  padding='SAME',
                                  normalizer_fn=None,
                                  normalizer_params=None,
                                  weights_regularizer=None,
                                  activation_fn=None)
        flatten = slim.flatten(net7)
        # net9 = slim.fully_connected(flatten, band_size, activation_fn=None)
    return tf.expand_dims(tf.expand_dims(flatten, axis=1), axis=1)
# experiment with small datasets
trainX = trainX[:1000]
trainY = trainY[:1000]

for neuron_count in neurons:
    tf.reset_default_graph()
    with tf.Session() as sess:

        # Create the model
        lr = tf.placeholder(tf.float32, [])
        x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
        y_ = tf.placeholder(tf.float32, [None, 1])

        # Build the graph for the deep net
        hidden_layer = layers.dense(x, neuron_count, activation=tf.nn.relu, kernel_initializer=init.orthogonal(np.sqrt(2)),
                                    bias_initializer=init.zeros(), kernel_regularizer=l2_regularizer(1e-3))
        output_layer = layers.dense(hidden_layer, 1, kernel_initializer=init.orthogonal(), bias_initializer=init.zeros(),
                                    kernel_regularizer=l2_regularizer(1e-3))

        loss = tf.reduce_mean(tf.square(y_ - output_layer)
                              ) + get_regularization_loss()

        # Create the gradient descent optimizer with the given learning rate.
        optimizer = tf.train.GradientDescentOptimizer(lr)
        train_op = optimizer.minimize(loss)
        error = tf.reduce_mean(tf.square(y_ - output_layer))

        fold_errors = []
        for o in range(NUM_FOLDS):
            sess.run(tf.global_variables_initializer())
コード例 #3
0
trainX, trainY = X_data[m:], Y_data[m:]

# experiment with small datasets
trainX = trainX[:1000]
trainY = trainY[:1000]

# Create the model
x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
y_ = tf.placeholder(tf.float32, [None, 1])

# Build the graph for the deep net
hidden_layer = layers.dense(x,
                            100,
                            activation=tf.nn.relu,
                            kernel_initializer=init.orthogonal(np.sqrt(2)),
                            bias_initializer=init.zeros(),
                            kernel_regularizer=l2_regularizer(1e-3))
output_layer = layers.dense(hidden_layer,
                            1,
                            kernel_initializer=init.orthogonal(),
                            bias_initializer=init.zeros(),
                            kernel_regularizer=l2_regularizer(1e-3))

loss = tf.reduce_mean(tf.square(y_ - output_layer)) + get_regularization_loss()

# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
error = tf.reduce_mean(tf.square(y_ - output_layer))

with tf.Session() as sess:
コード例 #4
0
    def __init__(self, name, session, observation_space, action_space):
        self.name = name

        assert isinstance(action_space, spaces.Box)
        assert len(action_space.shape) == 1
        assert (np.abs(action_space.low) == action_space.high
                ).all()  # we assume symmetric actions.

        self.session = session

        with tf.variable_scope(self.name):
            self.observations_input = tf.placeholder(
                observation_space.dtype,
                shape=tuple([None] + list(observation_space.shape)),
                name="observations_input")

            self.actions_input = tf.placeholder(
                action_space.dtype,
                shape=tuple([None] + list(action_space.shape)),
                name="actions_input")

            self.actions_size = action_space.shape[0]

            self.combined_input = tf.concat(
                [self.observations_input, self.actions_input],
                axis=1,
                name="combined_input")

            with tf.variable_scope("actor"):
                self.actor01 = layers.dense(
                    inputs=self.observations_input,
                    units=64,
                    activation=tf.tanh,
                    name="layer_one",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)))

                self.actor02 = layers.dense(
                    inputs=self.actor01,
                    units=64,
                    activation=tf.tanh,
                    name="layer_two",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)))

                self.actor_head = layers.dense(
                    inputs=self.actor02,
                    units=self.actions_input.shape[1].value,
                    activation=tf.tanh,
                    name="head",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(gain=0.01))

                self.actor_head_rescaled = self.actor_head * action_space.high

            self.model_computed_combined = tf.concat(
                [self.observations_input, self.actor_head_rescaled],
                axis=1,
                name="model_computed_combined")

            with tf.variable_scope("critic"):
                self.critic01 = layers.dense(
                    inputs=self.combined_input,
                    units=64,
                    activation=tf.tanh,
                    name="layer_one",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)))

                self.critic02 = layers.dense(
                    inputs=self.critic01,
                    units=64,
                    activation=tf.tanh,
                    name="layer_two",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)))

                self.action_value_head = layers.dense(
                    inputs=self.critic02,
                    units=1,
                    activation=None,
                    name="head",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.random_uniform(
                        -3.0e-3, 3.0e-3))

                self.model_critic01 = layers.dense(
                    inputs=self.model_computed_combined,
                    units=64,
                    activation=tf.tanh,
                    name="layer_one",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)),
                    reuse=
                    True  # Use the same weights for 'critic' and for 'model critic'
                )

                self.model_critic02 = layers.dense(
                    inputs=self.model_critic01,
                    units=64,
                    activation=tf.tanh,
                    name="layer_two",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.orthogonal(
                        gain=np.sqrt(2)),
                    reuse=
                    True  # Use the same weights for 'critic' and for 'model critic'
                )

                self.state_value_head = layers.dense(
                    inputs=self.model_critic02,
                    units=1,
                    activation=None,
                    name="head",
                    bias_initializer=initializers.zeros(),
                    kernel_initializer=initializers.random_uniform(
                        -3.0e-3, 3.0e-3),
                    reuse=
                    True  # Use the same weights for 'critic' and for 'model critic'
                )
コード例 #5
0
# experiment with small datasets
trainX = trainX[:1000]
trainX = (trainX - np.mean(trainX, axis=0)) / np.std(trainX, axis=0)
trainY = trainY[:1000]

n = trainX.shape[0]


# Create the model
x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES])

# Build the graph for the deep nets
hidden_layer = layers.dense(x, 10, activation=tf.nn.selu, kernel_initializer=init.orthogonal(),
                            bias_initializer=init.zeros(),
                            kernel_regularizer=l2_regularizer(1e-6))
output_layer = layers.dense(x, NUM_CLASSES, kernel_initializer=init.orthogonal(), bias_initializer=init.zeros(),
                            kernel_regularizer=l2_regularizer(1e-6))

cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=output_layer)
loss = tf.reduce_mean(cross_entropy) + get_regularization_loss()

# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)

correct_prediction = tf.cast(tf.equal(tf.argmax(output_layer, 1), tf.argmax(y_, 1)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)

with tf.Session() as sess:
コード例 #6
0
# experiment with small datasets
trainX = trainX[:1000]
trainY = trainY[:1000]

n = trainX.shape[0]


# Create the model
x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES])

# Build the graph for the deep net

hidden_layer = layers.dense(x, 10, activation=tf.nn.relu, kernel_initializer=init.orthogonal(
    np.sqrt(2)), bias_initializer=init.zeros(), kernel_regularizer=l2_regularizer(1e-6))
output_layer = layers.dense(hidden_layer, 6, kernel_initializer=init.orthogonal(
), bias_initializer=init.zeros(),  kernel_regularizer=l2_regularizer(1e-6))

cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
    labels=y_, logits=output_layer)
loss = tf.reduce_mean(cross_entropy) + get_regularization_loss()


# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)

correct_prediction = tf.cast(
    tf.equal(tf.argmax(output_layer, 1), tf.argmax(y_, 1)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
コード例 #7
0
 def build(self, input_shape: Union[list, tuple]):
     super().build(input_shape)
     self.__create_weights__(init.zeros()(self.__num_outputs__),
                             'log_std_params')