Пример #1
0
    def __call__(self, input):
        with tf.varible_scope("decoder"):
            h1 = tf.contrib.layers.conv2d_transpose(input,
                                                    num_outputs=64,
                                                    kernel_size=4,
                                                    stride=2)
            h2 = tf.contrib.layers.conv2d_transpose(h1,
                                                    num_outputs=64,
                                                    kernel_size=4,
                                                    stride=2)
            h3 = tf.contrib.layers.conv2d_transpose(h2,
                                                    num_outputs=32,
                                                    kernel_size=4,
                                                    stride=2)
            h4 = tf.contrib.layers.conv2d_transpose(h3,
                                                    num_outputs=32,
                                                    kernel_size=4,
                                                    stride=2)

            return (h4)
Пример #2
0
    def __init__(self, learning_rate=0.01, scope="policy_estimator"):
        with tf.varible_scope():
            self.state = tf.placeholder(tf.int32, [], "state")
            self.action = tf.placeholder(tf.int32, [], "action")
            self.target = tf.placeholder(tf.int32, [], "target")

        state_one_hot = tf.one_hot(self.state, int(env.observation_space.n))
        self.output_layer = tf.contrib.layers.fullt_connected(
            inputs=tf.expand_dims(state_one_hot, 0),
            num_outputs=env.action_space.n,
            activation_fn=None,
            weights_initializer=tf.zeros.initializer)

        self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))
        self.picked_Action_prob = tf.gather(self.action_probs, self.action)

        self.loss = -tf.log(self.picked_Action_prob) * self.target
        self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.train_op = self.optimizer.minimize(
            self.loss, global_step=tf.contrib.framework.get_global_step())
Пример #3
0
def inception_v3_base(inputs, scope=None):  # input 299*299*3
    end_points = {}  # 用来保存某些关键点供之后使用
    with tf.variable_scope(scope, 'InceptionV3', [inputs]):
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1,
                            padding='VALID'):
            net = slim.conv2d(
                input, 32, [3, 3], stride=2,
                scope='Conv2d_1a_3*3')  # (299-3)/2+1=149 149*194*32
            net = slim.conv2d(
                net, 32, [3, 3],
                scope='Conv2d_2a_3*3')  # (149-3)/1+1=147 147*147*32
            net = slim.conv2d(net,
                              64, [3, 3],
                              padding='SAME',
                              scope='Conv2d_3a_3*3')  # 147/1 147*147*64
            net = slim.max_pool2d(
                net, [3, 3], stride=2,
                scope='MaxPool_3a_3*3')  # (147-3)/2+1=73 73*73*64
            net = slim.conv2d(net, 80, [3, 3],
                              scope='Conv2d_3b_3*3')  # (73-3)/1+1=71 71*71*80
            net = slim.conv2d(net,
                              192, [3, 3],
                              stride=2,
                              scope='Conv2d_4a_3*3')  # (71-3)/2+1=35 35*35*192
            net = slim.max_pool2d(net, [3, 3],
                                  padding='SAVE',
                                  scope='MaxPool_5a_3*3')  # 35/1 35*35*192

    with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                        stride=1,
                        padding='SAVE'):
        # 第一个模块组
        with tf.variable_scope('Mixed_5b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       64, [5, 5],
                                       scope='Conv2d_0b_5*5')
            with tf.variable_scope('Branch_3'):
                branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='Conv2d_0b_3*3')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='Conv2d_0c_3*3')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       32, [1, 1],
                                       scope='Conv2d_0b_1*1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
        with tf.variable_scope('Mixed_5c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       64, [5, 5],
                                       scope='Conv2d_0b_5*5')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_3*3')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='Conv2d_0b_3*3')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='Conv2d_0c_3*3')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       64, [1, 1],
                                       scope='Conv2d_0b_1*1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('Mixed_5d'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       64, [5, 5],
                                       scope='Conv2d_0b_5*5')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='Conv2d_0b_3*3')
                branch_2 = slim.conv2d(branch_2,
                                       96, [3, 3],
                                       scope='COnv2d_0c_3*3')
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       64, [1, 1],
                                       scope="Conv2d_0b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        # 第二个模块组
        with tf.variable_scope('Mixed_6a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,
                                       384, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1a_3*3')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       96, [3, 3],
                                       scope='Conv2d_1b_3*3')
                branch_1 = slim.conv2d(branch_1,
                                       96, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1c_3*3')
            with tf.varible_scope("Branch_3"):
                branch_2 = slim.max_pool2d(net, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='maxPool_1a_3*3')
            net = tf.concat([branch_0, branch_1, branch_2], 3)

        with tf.variable_scope('Mixed_6b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       128, [1, 7],
                                       scope='Conv2d_1b_1*7')
                branch_1 = slim.conv2d(branch_1,
                                       192, [7, 1],
                                       scope='Conv2d_1b_7*1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       128, [7, 1],
                                       scope='Conv2d_1b_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       128, [1, 7],
                                       scope='COnv2d_1c_1*7')
                branch_2 = slim.conv2d(branch_2,
                                       128, [7, 1],
                                       scope='COnv2d_1c_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       192, [1, 7],
                                       scope='COnv2d_1c_1*7')
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('Mixed_6c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       160, [1, 7],
                                       scope='Conv2d_1b_1*7')
                branch_1 = slim.conv2d(branch_1,
                                       192, [7, 1],
                                       scope='Conv2d_1b_7*1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='Conv2d_1b_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [1, 7],
                                       scope='COnv2d_1c_1*7')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='COnv2d_1c_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       192, [1, 7],
                                       scope='COnv2d_1c_1*7')
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('Mixed_6d'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       160, [1, 7],
                                       scope='Conv2d_1b_1*7')
                branch_1 = slim.conv2d(branch_1,
                                       192, [7, 1],
                                       scope='Conv2d_1b_7*1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='Conv2d_1b_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [1, 7],
                                       scope='COnv2d_1c_1*7')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='COnv2d_1c_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       192, [1, 7],
                                       scope='COnv2d_1c_1*7')
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('Mixed_6e'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       160, [1, 7],
                                       scope='Conv2d_1b_1*7')
                branch_1 = slim.conv2d(branch_1,
                                       192, [7, 1],
                                       scope='Conv2d_1b_7*1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='Conv2d_1b_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       160, [1, 7],
                                       scope='COnv2d_1c_1*7')
                branch_2 = slim.conv2d(branch_2,
                                       160, [7, 1],
                                       scope='COnv2d_1c_7*1')
                branch_2 = slim.conv2d(branch_2,
                                       192, [1, 7],
                                       scope='COnv2d_1c_1*7')
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 保留
            end_points['Mixed_6e'] = net

        # 第三个模块
        with tf.variable_scope('Mixed_7a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
                branch_0 = slim.conv2d(branch_0,
                                       320, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = slim.conv2d(branch_1,
                                       192, [1, 7],
                                       scope='Conv2d_1b_1*7')
                branch_1 = slim.conv2d(branch_1,
                                       192, [7, 1],
                                       scope='Conv2d_1b_7*1')
                branch_1 = slim.conv2d(branch_1,
                                       192, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1a_1*1')
            with tf.varible_scope("Branch_2"):
                branch_2 = slim.max_pool2d(net, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='maxPool_1a_3*3')
            net = tf.concat([branch_0, branch_1, branch_2], 3)

        with tf.variable_scope('Mixed_7b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = tf.concat([
                    slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_1b_1*3'),
                    slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_1b_3*1')
                ], 3)
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       384, [3, 3],
                                       scope='Conv2d_1a_1*1')
                branch_2 = tf.concat([
                    slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_1b_1*3'),
                    slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_1b_3*1')
                ], 3)
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('Mixed_7c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_1a_1*1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_1a_1*1')
                branch_1 = tf.concat([
                    slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_1b_1*3'),
                    slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_1b_3*1')
                ], 3)
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_1a_1*1')
                branch_2 = slim.conv2d(branch_2,
                                       384, [3, 3],
                                       scope='Conv2d_1a_1*1')
                branch_2 = tf.concat([
                    slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_1b_1*3'),
                    slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_1b_3*1')
                ], 3)
            with tf.varible_scope("Branch_3"):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_1a_3*3')
                branch_3 = slim.conv2d(branch_3,
                                       192, [1, 1],
                                       scope="Conv2d_1b_1*1")
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        return net, end_points
Пример #4
0
def generator(noise, labels):
    with tf.varible_scope('generator'):
        inputs = tf.concat([noise, lables], 1)
        hidden = tf.layers.dense(inputs, n_hidden, activation = tf.nn.relu)
        output = tf.layers.dense(hidden, n_input, activation = tf.nn.sigmoid)
    return output
Пример #5
0
    def __call__(self, input):
        with tf.varible_scope("decoder"):
            out = input

            return (out)