Ejemplo n.º 1
0
def _shadowdata_discriminator_model(generated_data,
                                    generator_input,
                                    is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.999},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1))):
        band_size = generated_data.get_shape()[3].value

        net = generated_data
        net = tf.squeeze(net, axis=[1, 2])
        net = tf.expand_dims(net, axis=2)

        net1 = slim.convolution1d(net, band_size, band_size, padding='VALID')

        net2 = slim.convolution1d(transpose(net1, perm=[0, 2, 1]),
                                  band_size,
                                  band_size,
                                  padding='VALID',
                                  normalizer_fn=None,
                                  normalizer_params=None,
                                  activation_fn=None)

    return tf.expand_dims(tf.expand_dims(slim.flatten(net2), axis=1), axis=1)
Ejemplo n.º 2
0
def _shadowdata_generator_model_simple(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose, slim.convolution1d],
            trainable=is_training,
            data_format="NHWC"):
        band_size = netinput.get_shape()[3].value
        net = tf.expand_dims(tf.squeeze(netinput, axis=[1, 2]), axis=2)
        net = slim.convolution1d(net,
                                 1,
                                 band_size,
                                 padding='SAME',
                                 normalizer_fn=None,
                                 normalizer_params=None,
                                 weights_regularizer=None,
                                 activation_fn=None)
    return tf.expand_dims(tf.expand_dims(slim.flatten(net), axis=1), axis=1)
Ejemplo n.º 3
0
def _shadowdata_feature_discriminator_model(generated_data, is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.999},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1))):
        band_size = generated_data.get_shape()[3].value

        net = generated_data
        net = slim.flatten(net)

        net1 = slim.fully_connected(net, band_size // 2)
        net2 = slim.fully_connected(net1, band_size // 4)
        net3 = slim.fully_connected(net2, band_size // 8)
    return net3
Ejemplo n.º 4
0
 def _build_graph_a(self, state_input, scope_name, train):
     # 环境和智能体本地的共同观察
     with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
         with slim.arg_scope(
             [slim.conv2d, slim.fully_connected],
                 trainable=train,
                 weights_initializer=tf.truncated_normal_initializer(
                     stddev=0.1),
                 weights_regularizer=slim.l2_regularizer(0.05)):
             fc1 = slim.fully_connected(state_input,
                                        100,
                                        scope='full_connected1')
             relu1 = tf.nn.relu(fc1)
             fc2 = slim.fully_connected(relu1, 100, scope='full_connected2')
             relu2 = tf.nn.relu(fc2)
             fc3 = slim.fully_connected(relu2,
                                        self.action_dim,
                                        scope='full_connected3')
             output = tf.nn.softmax(fc3)
             return output
Ejemplo n.º 5
0
def _shadowdata_discriminator_model_simple(generated_data,
                                           generator_input,
                                           is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.01))):
        band_size = generated_data.get_shape()[3].value

        net = tf.concat(axis=3, values=[generated_data, generator_input])
        net = tf.squeeze(net, axis=[1, 2])
        net = tf.expand_dims(net, axis=2)
        size = band_size * 2
        net = slim.convolution1d(net,
                                 size,
                                 size,
                                 padding='VALID',
                                 normalizer_fn=None,
                                 normalizer_params=None,
                                 activation_fn=None)
        net = tf.expand_dims(tf.expand_dims(slim.flatten(net), axis=1), axis=1)
    return net
Ejemplo n.º 6
0
    def _build_graph_c(self, state_input, action, scope_name, train):
        # 环境和智能体本地的共同观察
        with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
            with slim.arg_scope(
                [slim.conv2d, slim.fully_connected],
                    trainable=train,
                    weights_initializer=tf.truncated_normal_initializer(
                        stddev=0.1),
                    weights_regularizer=slim.l2_regularizer(0.05)):
                fc1_s = slim.fully_connected(state_input,
                                             100,
                                             scope='full_connected1_s')
                fc1_a = slim.fully_connected(action,
                                             100,
                                             scope='full_connected1_a')
                concat = tf.concat([fc1_s, fc1_a], axis=1)

                fc1 = slim.fully_connected(concat,
                                           100,
                                           scope='full_connected1')
                output = slim.fully_connected(fc1, 1, scope='full_connected2')

                return output
Ejemplo n.º 7
0
def _shadowdata_generator_model(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose, slim.convolution1d],
            # weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_initializer=initializers.zeros(),
            # weights_regularizer=slim.l1_l2_regularizer(),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1)),
            trainable=is_training,
            data_format="NHWC"):
        num_filters = 1
        band_size = netinput.get_shape()[3].value
        kernel_size = band_size

        net0 = tf.expand_dims(tf.squeeze(netinput, axis=[1, 2]), axis=2)
        net1 = slim.convolution1d(net0,
                                  num_filters,
                                  kernel_size,
                                  padding='SAME')
        net1 = net1 + net0

        net2 = slim.convolution1d(net1,
                                  num_filters,
                                  kernel_size // 2,
                                  padding='SAME')
        net2 = net2 + net1 + net0

        net3 = slim.convolution1d(net2,
                                  num_filters,
                                  kernel_size // 4,
                                  padding='SAME')
        net3 = net3 + net2 + net1

        net4 = slim.convolution1d(net3,
                                  num_filters,
                                  kernel_size // 8,
                                  padding='SAME')
        net4 = net4 + net3 + net2

        net5 = slim.convolution1d(net4,
                                  num_filters,
                                  kernel_size // 4,
                                  padding='SAME')
        net5 = net5 + net4 + net3

        net6 = slim.convolution1d(net5,
                                  num_filters,
                                  kernel_size // 2,
                                  padding='SAME')
        net6 = net6 + net5 + net4

        net7 = slim.convolution1d(net6,
                                  num_filters,
                                  kernel_size,
                                  padding='SAME',
                                  normalizer_fn=None,
                                  normalizer_params=None,
                                  weights_regularizer=None,
                                  activation_fn=None)
        flatten = slim.flatten(net7)
        # net9 = slim.fully_connected(flatten, band_size, activation_fn=None)
    return tf.expand_dims(tf.expand_dims(flatten, axis=1), axis=1)
Ejemplo n.º 8
0
    def create_Q_network(self, name):  # 创建Q网络(vgg16结构)
        self.state_input = tf.placeholder("float",
                                          shape=self.state_dim,
                                          name='state_input')
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            with slim.arg_scope(
                [slim.conv2d],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=tf.truncated_normal_initializer(
                        self.mu, self.sigma),
                    weights_regularizer=slim.l2_regularizer(0.0005),
            ):
                # 112 * 112 * 64
                net = slim.conv2d(self.state_input,
                                  64, [7, 7],
                                  stride=2,
                                  scope='conv1')

                # 56 * 56 * 64
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')

                temp = net

                # 第一残差块
                net = slim.conv2d(net, 64, [3, 3], scope='conv2_1_1')
                net = slim.conv2d(net, 64, [3, 3], scope='conv2_1_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 残差块
                net = slim.conv2d(net, 64, [3, 3], scope='conv2_2_1')
                net = slim.conv2d(net, 64, [3, 3], scope='conv2_2_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 28 * 28 * 128
                temp = slim.conv2d(temp, 128, [1, 1], stride=2, scope='r1')

                # 第二残差块
                net = slim.conv2d(net,
                                  128, [3, 3],
                                  stride=2,
                                  scope='conv3_1_1')
                net = slim.conv2d(net, 128, [3, 3], scope='conv3_1_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 残差块
                net = slim.conv2d(net, 128, [3, 3], scope='conv3_2_1')
                net = slim.conv2d(net, 128, [3, 3], scope='conv3_2_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 14 * 14 * 256
                temp = slim.conv2d(temp, 256, [1, 1], stride=2, scope='r2')

                # 第三残差块
                net = slim.conv2d(net,
                                  256, [3, 3],
                                  stride=2,
                                  scope='conv4_1_1')
                net = slim.conv2d(net, 256, [3, 3], scope='conv4_1_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 残差块
                net = slim.conv2d(net, 256, [3, 3], scope='conv4_2_1')
                net = slim.conv2d(net, 256, [3, 3], scope='conv4_2_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 7 * 7 * 512
                temp = slim.conv2d(temp, 512, [1, 1], stride=2, scope='r3')

                # 第四残差块
                net = slim.conv2d(net,
                                  512, [3, 3],
                                  stride=2,
                                  scope='conv5_1_1')
                net = slim.conv2d(net, 512, [3, 3], scope='conv5_1_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                temp = net
                # 残差块
                net = slim.conv2d(net, 512, [3, 3], scope='conv5_2_1')
                net = slim.conv2d(net, 512, [3, 3], scope='conv5_2_2')
                # 残差相加
                net = tf.nn.relu(tf.add(temp, net))

                net = slim.avg_pool2d(net, [4, 4], stride=1, scope='pool2')

                net = slim.flatten(net, scope='flatten')
                fc1 = slim.fully_connected(net, 1000, scope='fc1')

                self.logits = slim.fully_connected(fc1,
                                                   self.action_dim +
                                                   self.parameterdim,
                                                   activation_fn=None,
                                                   scope='fc2')
                self.Q_value = tf.nn.softmax(self.logits)