コード例 #1
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def actor(self, state):
        with tf.variable_scope('actor'):
            fc1, w1, b1 = op.linear(op.flatten(state), 400, name='fc1', stddev=0.001, bias_start=0.001)
            fc2, w2, b2 = op.linear(fc1, 300, name='fc2', stddev=0.001, bias_start=0.001)
            action, w3, b3 = op.linear(fc2, self.environment.get_num_actions(), name='actions', activation_fn='tanh', stddev=0.001, bias_start=0.001)

            return action * 2
コード例 #2
0
ファイル: agent.py プロジェクト: 23156145525/icodoom
    def make_net(self, input_images, input_measurements, input_actions, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        
        self.fc_val_params = np.copy(self.fc_joint_params)
        self.fc_val_params['out_dims'][-1] = self.target_dim
        self.fc_adv_params = np.copy(self.fc_joint_params)
        self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
        print(len(self.net_discrete_actions) * self.target_dim)
        p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
        print ("Conv Params: ", self.conv_params)

        p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
        print ("img_params", self.fc_img_params)
        p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
        print ("meas_params", self.fc_meas_params)
        p_val_fc = my_ops.fc_net(tf.concat(1, [p_img_fc,p_meas_fc]), self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
        print ("val_params", self.fc_val_params)
        p_adv_fc = my_ops.fc_net(tf.concat(1, [p_img_fc,p_meas_fc]), self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
        print ("adv_params", self.fc_adv_params)

        p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True)  
        
        self.pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.net_discrete_actions), self.target_dim])
        self.pred_all = self.pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
        self.pred_relevant = tf.boolean_mask(self.pred_all, tf.cast(input_actions, tf.bool))
        print ("make_net: input_actions: ", input_actions)
        print ("make_net: pred_all: ", self.pred_all)
        print ("make_net: pred_relevant: ", self.pred_relevant)
コード例 #3
0
ファイル: architecture.py プロジェクト: dhruvramani/GANs
    def discriminator(self, inputs, scope='discriminator', reuse=None):
        with tf.variable_scope(scope, reuse=reuse):

            # Set discriminator to always be training. Reason for doing this is
            # For the WGAN gradient loss (which is not the default loss function for
            # this model, still uses this architecture), the loss function has an expression
            # which is the gradient of an instance of the discriminator. Putting that
            # into the optimizer creates a dependency on the second order gradient of the
            # disriminator. Batch normalization where the training vs running flag is itself
            # a TF variable (rather than normal python boolean) seems to break this. Easier to
            # just set to True because in this model we only ever use the discriminator for
            # training (to train the generator).
            bn = BN(True)

            t = lrelu(conv2d(inputs, 64))  # no bn here
            t = lrelu(bn(conv2d(t, 128)))
            t = lrelu(bn(conv2d(t, 256)))
            t = lrelu(bn(conv2d(t, 512)))
            t = lrelu(bn(conv2d(t, 1024)))

            # flatten 3D tensor into 1D to prepare for a dense (fully connected)
            # layer. Flattened tensor can also be treated as vector that can be
            # used for learned similarty measurements between images.
            similarity = flatten(t)

            # return logits (before sigmoid activation) because several TF
            # accumulator functions expect logits, and do the sigmoid for you
            logits = dense(similarity, 1)
            classification = sigmoid(logits)
            return classification, logits, similarity
コード例 #4
0
    def resnet_with_bottleneck(self,input,is_training,layer_from_2=[3,4,6,3],first_kernel=7,first_stride=2,first_pool=True,stride=2):

        input_shape = input.get_shape().as_list()[1:]
        conv=ops.conv2d(input,'initial_conv',[first_kernel,first_kernel,input_shape[2],64],[1,first_stride,first_stride,1])
        if first_pool:
            conv=ops.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1])

        for i in range(layer_from_2[0]):
            conv=ops.residual_bottleneck_block(conv,'Block_1_'+str(i),is_training,256,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[1]):
            conv=ops.residual_bottleneck_block(conv,'Block_2_'+str(i),is_training,512,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[2]):
            conv=ops.residual_bottleneck_block(conv,'Block_3_'+str(i),is_training,1024,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[3]):
            conv=ops.residual_bottleneck_block(conv,'Block_4_'+str(i),is_training,2048,kernel=3,first_block=True,stride=stride)
        with tf.variable_scope('unit'):
            conv = ops.batch_normalization(conv,is_training)
            conv = tf.nn.relu(conv)
            conv = ops.global_avg_pool(conv)
            conv =ops.flatten(conv)
        with tf.variable_scope('logit'):
            conv = ops.get_hidden_layer(conv,'output',self.no_of_classes,'none')
        return conv
コード例 #5
0
    def vgg16(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {
            'c1_1': [3, 3, x_shape[2], 64], 'c1_2': [3, 3, 64, 64],
            'c2_1': [3, 3, 64, 128], 'c2_2': [3, 3, 128, 128],
            'c3_1': [3, 3, 128, 256], 'c3_2': [3, 3, 256, 256],
            'c3_3': [3, 3, 256, 256],
            'c4_1': [3, 3, 256, 512], 'c4_2': [3, 3, 512, 512],
            'c4_3': [3, 3, 512, 512],
            'c5_1': [3, 3, 512, 512], 'c5_2': [3, 3, 512, 512],
            'c5_3': [3, 3, 512, 512]}
        strides = {'c': [1, 1, 1, 1], 'p': [1, 2, 2, 1]}
        pool_win_size = [1, 2, 2, 1]
        conv = x

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(conv,'Conv_1_1', kernel['c1_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_1_2', kernel['c1_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'Conv_2_1', kernel['c2_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_2_2', kernel['c2_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'Conv_3_1', kernel['c3_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_2', kernel['c3_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_3', kernel['c3_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'Conv_4_1', kernel['c4_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_2', kernel['c4_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_3', kernel['c4_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'Conv_5_1', kernel['c5_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_2', kernel['c5_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_3', kernel['c5_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv = ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
コード例 #6
0
    def alexnet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [11, 11, x_shape[2], 96], 'c2': [5, 5, 96, 256],
                  'c3': [3, 3, 256, 384], 'c4': [3, 3, 384, 384],
                  'c5': [3, 3, 384, 256]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}
        pool_win_size = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv_1', kernel['c1'], strides['4'], 'VALID')
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], "VALID")
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv_2', kernel['c2'], strides['1'], padding='SAME', groups=2)
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'conv_3', kernel['c3'], strides['1'], 'SAME')
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'conv_4', kernel['c4'], strides['1'], 'SAME', groups=2)
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'conv_5', kernel['c5'], strides['1'], 'SAME', groups=2)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes, activation='none', initializer='xavier')
        return conv
コード例 #7
0
    def aux_classifier(self, inputs, labels, input_channels, is_training, scope=None):
        """
            Auxiliary Classifier used in Inception Module to help propagate
            gradients backward.
        """
        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            # pooling layer with 5x5 kernel and stride 3 (new size: 4x4xC)
            network = tf.nn.avg_pool(inputs, 5, 3, 'VALID', name='pool')

            # convolution with 1x1 kernel and stride 1 (new size: 4x4x128)
            network = ops.convolution(network, input_channels, 128, 1, 128, batch_norm=False,
                                      is_training=is_training, scope='auxconv')

            # flatten (new size: 2048)
            network = ops.flatten(network, scope='flatten')

            # fully connected layer (new size: 1024)
            network = ops.dense(network, 2048, 1024, dropout=True, dropout_rate=0.7,
                                is_training=is_training, scope='fc1')

            # output layer (new size: 10) -- Original Paper Size: 1000 (for ImageNet)
            network = ops.dense(network, 1024, 10, activation=None, is_training=is_training,
                                scope='fc2')

            # loss of auxiliary classifier
            loss = ops.loss(network, labels, scope='auxloss')

            return loss
コード例 #8
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def critic(self, state, action):
        with tf.variable_scope('critic'):
            fc1, w1, b1 = op.linear(op.flatten(state), 400, name='fc1', stddev=0.001, bias_start=0.001)
            fc2, w2, b2 = op.linear(fc1, 300, name='fc2', activation_fn='none', stddev=0.001, bias_start=0.001)
            fc2a, w3, b3 = op.linear(action, 300, name='fc3', activation_fn='none', stddev=0.001, bias_start=0.001)
            q, w4, b4 = op.linear(tf.nn.relu(fc2 + fc2a - b2), 1, name='value', activation_fn='none', stddev=0.001, bias_start=0.001)

            return q
コード例 #9
0
    def dense_net(self, input_x):
        x = conv2d(input_x, 2 * self.nf, ks=[7, 7], s=2,
                   name='conv0')  # 16*16*(2*nf)
        print(x.get_shape().as_list())
        x = max_pooling(x, ks=[3, 3], s=2, padding='SAME')  # 8*8*(2*nf)
        print(x.get_shape().as_list())

        nb_blocks = 3
        for i in range(nb_blocks - 1):
            # 6 -> 12 -> 48
            x = self.dense_block(input_x=x,
                                 nb_layers=4,
                                 layer_name='dense_' + str(i))
            x = self.transition_layer(x, scope='trans_' + str(i))
        # 4*4*nf
        print(x.get_shape().as_list())
        """
        x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
        x = self.transition_layer(x, scope='trans_1')
        x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
        x = self.transition_layer(x, scope='trans_2')
        x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
        x = self.transition_layer(x, scope='trans_3')
        """

        x = self.dense_block(
            input_x=x, nb_layers=6,
            layer_name='dense_final')  # 4*4*((nb_layers+1)*nf)
        print(x.get_shape().as_list())
        x = flatten(x)
        x = batch_norm(x, self.phase, 'linear_batch')
        x = relu(x)
        #         x = global average pooling (x)
        x = flatten(x)
        x = linear(x, self.label_n, name='linear2')
        return tf.nn.softmax(x)

        # 100 Layer
        #         x = Batch_Normalization(x, training=self.training, scope='linear_batch')
        #         x = Relu(x)
        #         x = Global_Average_Pooling(x)
        #         x = flatten(x)
        #         x = Linear(x)

        # x = tf.reshape(x, [-1, 10])
        return x
コード例 #10
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):
        with op.context(default_activation_fn='relu'):
            conv1, w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')
            conv2, w2, b2 = op.conv2d(conv1, size=4, filters=64, stride=2, name='conv2')
            conv3, w3, b3 = op.conv2d(conv2, size=3, filters=64, stride=1, name='conv3')
            fc4, w4, b4 = op.linear(op.flatten(conv3, name="fc4"), 512, name='fc4')
            output, w5, b5 = op.linear(fc4, self.environment.get_num_actions(), activation_fn='none', name='output')

            return output
コード例 #11
0
    def __init__(self, ob_space, subgoal_space, intrinsic_type):
        self.x = x = \
            tf.placeholder(tf.float32, [None] + list(ob_space), name='x_meta')
        self.subgoal_prev = subgoal_prev = \
            tf.placeholder(tf.float32, [None, subgoal_space], name='subgoal_prev')
        self.reward_prev = reward_prev = \
            tf.placeholder(tf.float32, [None, 1], name='reward_prev_meta')
        self.intrinsic_type = intrinsic_type

        with tf.variable_scope('encoder', reuse=True):
            x = tf.image.resize_images(x, [84, 84])
            x = x / 255.0
            x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
            x = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
            x = flatten(x)

        with tf.variable_scope('meta_policy'):
            x = tf.nn.relu(
                linear(x, 256, "fc", normalized_columns_initializer(0.01)))
            x = tf.concat([x, subgoal_prev], axis=1)
            x = tf.concat([x, reward_prev], axis=1)

            # introduce a "fake" batch dimension of 1 after flatten
            # so that we can do LSTM over time dim
            x = tf.expand_dims(x, [0])

            size = 256
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
            self.state_size = lstm.state_size
            step_size = tf.shape(self.x)[:1]

            c_init = np.zeros((1, lstm.state_size.c), np.float32)
            h_init = np.zeros((1, lstm.state_size.h), np.float32)
            self.state_init = [c_init, h_init]
            c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
            h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
            self.state_in = [c_in, h_in]

            state_in = rnn.LSTMStateTuple(c_in, h_in)
            lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
                lstm,
                x,
                initial_state=state_in,
                sequence_length=step_size,
                time_major=False)
            lstm_c, lstm_h = lstm_state
            lstm_outputs = tf.reshape(lstm_outputs, [-1, size])
            self.logits = linear(lstm_outputs, subgoal_space, "action",
                                 normalized_columns_initializer(0.01))
            self.vf = tf.reshape(
                linear(lstm_outputs, 1, "value",
                       normalized_columns_initializer(1.0)), [-1])
            self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
            self.sample = categorical_sample(self.logits, subgoal_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          tf.get_variable_scope().name)
コード例 #12
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testFlatten3D(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width),
                                    seed=1,
                                    name='images')
         output = ops.flatten(images)
         self.assertEquals(output.get_shape().num_elements(),
                           images.get_shape().num_elements())
         self.assertEqual(output.get_shape()[0], images.get_shape()[0])
コード例 #13
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):
        with op.context(default_activation_fn='relu'):
            fc1,    w1, b1 = op.linear(op.flatten(states, name="fc1_flatten"), 500, name='fc1')
            fc2,    w2, b2 = op.linear(fc1, 500, name='fc2')
            value,  w3, b3 = op.linear(fc2, self.environment.get_num_actions(), activation_fn='none', name='value')
            advantages, w4, b4 = op.linear(fc2, self.environment.get_num_actions(), activation_fn='none', name='advantages')

            # Dueling DQN - http://arxiv.org/pdf/1511.06581v3.pdf
            output = value + (advantages - op.mean(advantages, keep_dims=True))

        return output
コード例 #14
0
ファイル: model.py プロジェクト: tamwaiban/Hifill-tensorflow
 def build_discriminator(self, x, reuse=False, training=True, nc=64):
     with tf.variable_scope('discriminator', reuse=reuse):
         x = dis_conv(x, nc, name='conv1', training=training)
         x = dis_conv(x, nc * 2, name='conv2', training=training)
         x = dis_conv(x, nc * 4, name='conv3', training=training)
         x = dis_conv(x, nc * 4, name='conv4', training=training)
         x = dis_conv(x, nc * 4, name='conv5', training=training)
         x = dis_conv(x, nc * 4, name='conv6', training=training)
         x = flatten(x, name='reshape')
         D = tf.layers.dense(x, 1, name='linear')
         return D
コード例 #15
0
    def build_model(self, inputs, labels, is_training):
        # pad inputs to size 224x224x3 - NOTE: may change to bilinear upsampling
        pad = int((self.image_size - self.height) / 2)
        inputs = tf.pad(inputs, [[0, 0], [pad, pad], [pad, pad], [0, 0]])

        # convolution with 11x11 kernel and stride 4 (new size: 55x55x96)
        self.network = ops.convolution(inputs, self.channels, 96, 11, 96, stride=4,
                                       padding='VALID', is_training=is_training, scope='conv1')

        # pooling with 3x3 kernel and stride 2 (new size: 27x27x96)
        self.network = ops.pooling(self.network, k_size=3, scope='pool1')

        # convolution with 5x5 kernel and stride 1 (new size: 27x27x256)
        self.network = ops.convolution(self.network, 96, 256, 5, 256,
                                       is_training=is_training, scope='conv2')

        # pooling with 3x3 kernel and stride 2 (new size: 13x13x256)
        self.network = ops.pooling(self.network, k_size=3, scope='pool2')

        # convolution with 3x3 kernel and stride 1 (new size: 13x13x384)
        self.network = ops.convolution(self.network, 256, 384, 3, 384, batch_norm=False,
                                       is_training=is_training, scope='conv3')

        # convolution with 3x3 kernel and stride 1 (new size: 13x13x384)
        self.network = ops.convolution(self.network, 384, 384, 3, 384, batch_norm=False,
                                       is_training=is_training, scope='conv4')

        # convolution with 3x3 kernel and stride 1 (new size: 13x13x256)
        self.network = ops.convolution(self.network, 384, 256, 3, 256, batch_norm=False,
                                       is_training=is_training, scope='conv5')

        # pooling with 3x3 kernel and stride 2 (new size: 6x6x256)
        self.network = ops.pooling(self.network, k_size=3, scope='pool3')

        # flatten (new size: 9216)
        self.network = ops.flatten(self.network, scope='flatten')

        # fully connected layer (new size: 4096)
        self.network = ops.dense(self.network, 9216, 4096, dropout=True, dropout_rate=0.2,
                                 is_training=is_training, scope='fc1')

        # fully connected layer (new size: 1024) -- Original Paper Size: 4096 (for ImageNet)
        self.network = ops.dense(self.network, 4096, 1024, dropout=True, dropout_rate=0.2,
                                 is_training=is_training, scope='fc2')

        # output layer (new size: 10) -- Original Paper Size: 1000 (for ImageNet)
        self.network = ops.dense(self.network, 1024, 10, activation=None,
                                 is_training=is_training, scope='fc3')

        self.loss = ops.loss(self.network, labels, scope='loss')

        if is_training:
            self.optimizer = ops.optimize(self.loss, self.learning_rate, scope='update')
コード例 #16
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):
        with op.context(default_activation_fn='relu'):
            # Common Perception
            l1,     w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')

            # A Side
            l2a,    w2, b2 = op.conv2d(l1, size=4, filters=64, stride=2, name='a_conv2')
            l2a_fc, w3, b3 = op.linear(op.flatten(l2a, name="a_fc4"), 32, activation_fn='none', name='a_fc3')

            # B Side
            l2b,    w4, b4 = op.conv2d(l1, size=4, filters=64, stride=2, name='b_conv2')
            l2b_fc, w5, b5 = op.linear(op.flatten(l2b, name="b_fc4"), 32, activation_fn='none', name='b_fc3')

            # Causal Matrix
            l2a_fc_e = op.expand(l2a_fc, 2, name='a')  # now ?x32x1
            l2b_fc_e = op.expand(l2b_fc, 1, name='b')  # now ?x1x32
            causes = op.flatten(tf.batch_matmul(l2a_fc_e, l2b_fc_e, name='causes'))

            l4,      w6, b6 = op.linear(causes, 512, name='l4')
            output,  w5, b5 = op.linear(l4, self.environment.get_num_actions(), activation_fn='none', name='output')

            return output
コード例 #17
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testFlattenBatchSize(self):
     height, width = 3, 3
     with self.test_session() as sess:
         images = tf.random_uniform((5, height, width, 3),
                                    seed=1,
                                    name='images')
         inputs = tf.placeholder(tf.int32, (None, height, width, 3))
         output = ops.flatten(inputs)
         self.assertEquals(output.get_shape().as_list(),
                           [None, height * width * 3])
         output = sess.run(output, {inputs: images.eval()})
         self.assertEquals(output.size, images.get_shape().num_elements())
         self.assertEqual(output.shape[0], images.get_shape()[0])
コード例 #18
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):
        with op.context(default_activation_fn='relu'):
            conv1,    w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')
            conv2,    w2, b2 = op.conv2d(conv1, size=4, filters=64, stride=2, name='conv2')
            conv3,    w3, b3 = op.conv2d(conv2, size=3, filters=64, stride=1, name='conv3')
            fc4,      w4, b4 = op.linear(op.flatten(conv3, name="fc4"), 512, name='fc4')
            output,   w5, b5 = op.linear(fc4, self.environment.get_num_actions(), activation_fn='none', name='output')
            raw_sigma, w6, b6 = op.linear(fc4, self.environment.get_num_actions(), name='variance')

            raw_sigma += 0.0001  # to avoid divide by zero
            sigma = tf.exp(raw_sigma)

        return output, sigma
コード例 #19
0
    def _resnet(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            num_filters = [128, 256, 512, 512]
            if self._image_size == 256:
                num_filters.append(512)

            E = input
            E = ops.conv_block(E,
                               64,
                               'C{}_{}'.format(64, 0),
                               4,
                               2,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation='leaky',
                               bias=True)
            for i, n in enumerate(num_filters):
                E = ops.residual(E,
                                 n,
                                 'res{}_{}'.format(n, i + 1),
                                 self._is_train,
                                 self._reuse,
                                 norm=self._norm,
                                 bias=True)
                E = tf.nn.avg_pool(E, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
            E = tf.nn.relu(E)
            E = tf.nn.avg_pool(E, [1, 8, 8, 1], [1, 8, 8, 1], 'SAME')
            E = ops.flatten(E)
            mu = ops.mlp(E,
                         self._latent_dim,
                         'FC8_mu',
                         self._is_train,
                         self._reuse,
                         norm=None,
                         activation=None)
            log_sigma = ops.mlp(E,
                                self._latent_dim,
                                'FC8_sigma',
                                self._is_train,
                                self._reuse,
                                norm=None,
                                activation=None)

            z = mu + tf.random_normal(
                shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return z, mu, log_sigma
コード例 #20
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):

        with tf.variable_scope('net'), op.context(default_activation_fn='relu'):
            conv1,     w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')
            conv2,     w2, b2 = op.conv2d(conv1, size=4, filters=64, stride=2, name='conv2')
            conv3,     w3, b3 = op.conv2d(conv2, size=3, filters=64, stride=1, name='conv3')
            fc4,       w4, b4 = op.linear(op.flatten(conv3), 256, name='fc4')

            h,         w5, b5 = op.linear(fc4, 256, name='h')
            h1,        w6, b6 = op.linear(h, 256, name='h1')
            hhat,      w7, b7 = op.linear(h1, 256, name='hhat')

            fc8,       w8, b8 = op.linear(op.merge(h, hhat, name="fc8"), 256, name='fc8')
            output,    w9, b9 = op.linear(fc8, self.environment.get_num_actions(), activation_fn='none', name='output')

        with tf.name_scope('prediction'), tf.variable_scope('net', reuse=True), op.context(default_activation_fn='relu'):
            hhat_conv1, _, _ = op.conv2d(self.inputs.lookaheads, size=8, filters=32, stride=4, name='conv1')
            hhat_conv2, _, _ = op.conv2d(hhat_conv1, size=4, filters=64, stride=2, name='conv2')
            hhat_conv3, _, _ = op.conv2d(hhat_conv2, size=3, filters=64, stride=1, name='conv3')
            hhat_truth, _, _ = op.linear(op.flatten(hhat_conv3), 256, name='fc4')

            self.constraint_error = tf.reduce_mean((hhat - hhat_truth)**2, reduction_indices=1, name='prediction_error')

        return output
コード例 #21
0
ファイル: network.py プロジェクト: BYU-PCCL/Jedi
    def build(self, states):
        with op.context(default_activation_fn='relu'):
            conv1, w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')
            conv2, w2, b2 = op.conv2d(conv1, size=4, filters=64, stride=2, name='conv2')
            conv3, w3, b3 = op.conv2d(conv2, size=3, filters=64, stride=1, name='conv3')
            conv3_flatten = op.flatten(conv3, name="conv3_flatten")

            fc4_value, w4, b4 = op.linear(conv3_flatten, 512, name='fc4_value')
            value, w5, b5 = op.linear(fc4_value, 1, activation_fn='none', name='value')

            fc4_advantage, w6, b6 = op.linear(conv3_flatten, 512, name='fc4_advantages')
            advantages, w7, b7 = op.linear(fc4_advantage, self.environment.get_num_actions(), activation_fn='none', name='advantages')

            # Dueling DQN - http://arxiv.org/pdf/1511.06581v3.pdf
            output = value + (advantages - op.mean(advantages, keep_dims=True))

            return output
コード例 #22
0
ファイル: nets.py プロジェクト: maestrojeong/wgan_gp
 def __call__(self, x, reuse=False):
     '''
     Args :
         x - 4D tensor [batch_size, 28, 28, 1]
         reuse - bool
             whether reuse or not
     Return :
         d - 2D tensor [batch, 1]
     '''
     with tf.variable_scope(self.name) as scope:
         if reuse:
             scope.reuse_variables()
         d = convolution(x, [4, 4, 1, 32], strides = [1, 2, 2, 1], activation = leaky_relu, scope = 'conv1')
         d = flatten(d)
         d = fc_layer(d, 128, activation=leaky_relu, scope="fc1")
         d = fc_layer(d, 1, scope="fc2")
     return d
コード例 #23
0
def mobilenetv2(inputs, num_classes, is_train=True, reuse=False):
    exp = 6  # expansion ratio
    with tf.variable_scope('mobilenetv2'):
        net = ops.conv2d_block(inputs, 32, 3, 2, is_train,
                               name='conv1_1')  # size/2

        net = ops.res_block(net, 1, 16, 1, is_train, name='res2_1')

        net = ops.res_block(net, exp, 24, 2, is_train, name='res3_1')  # size/4
        net = ops.res_block(net, exp, 24, 1, is_train, name='res3_2')

        net = ops.res_block(net, exp, 32, 2, is_train, name='res4_1')  # size/8
        net = ops.res_block(net, exp, 32, 1, is_train, name='res4_2')
        net = ops.res_block(net, exp, 32, 1, is_train, name='res4_3')

        net = ops.res_block(net, exp, 64, 1, is_train, name='res5_1')
        net = ops.res_block(net, exp, 64, 1, is_train, name='res5_2')
        net = ops.res_block(net, exp, 64, 1, is_train, name='res5_3')
        net = ops.res_block(net, exp, 64, 1, is_train, name='res5_4')

        net = ops.res_block(net, exp, 96, 2, is_train,
                            name='res6_1')  # size/16
        net = ops.res_block(net, exp, 96, 1, is_train, name='res6_2')
        net = ops.res_block(net, exp, 96, 1, is_train, name='res6_3')

        net = ops.res_block(net, exp, 160, 2, is_train,
                            name='res7_1')  # size/32
        net = ops.res_block(net, exp, 160, 1, is_train, name='res7_2')
        net = ops.res_block(net, exp, 160, 1, is_train, name='res7_3')

        net = ops.res_block(net,
                            exp,
                            320,
                            1,
                            is_train,
                            name='res8_1',
                            shortcut=False)

        net = ops.pwise_block(net, 1280, is_train, name='conv9_1')
        net = ops.global_avg(net)
        logits = ops.flatten(ops.conv_1x1(net, num_classes, name='logits'))

        pred = tf.nn.softmax(logits, name='prob')
        return logits, pred
コード例 #24
0
    def build_model(self, inputs, labels, is_training=False):
        self.network = ops.convolution(inputs,
                                       self.channels,
                                       50,
                                       5,
                                       50,
                                       is_training=is_training,
                                       scope='conv1')

        self.network = ops.pooling(self.network, scope='pool1')

        self.network = ops.convolution(self.network,
                                       50,
                                       20,
                                       5,
                                       20,
                                       is_training=is_training,
                                       scope='conv2')

        self.network = ops.pooling(self.network, scope='pool2')

        self.network = ops.flatten(self.network, scope='flatten')

        self.network = ops.dense(self.network,
                                 self.network.get_shape().as_list()[1],
                                 200,
                                 scope='fc1')

        self.network = ops.dense(self.network, 200, 50, scope='fc2')

        self.network = ops.dense(self.network,
                                 50,
                                 10,
                                 activation=None,
                                 scope='fc3')

        self.loss = ops.loss(self.network, labels, scope='loss')
        self.accuracy = ops.accuracy(self.network, labels, scope='accuracy')

        if is_training:
            self.optimizer = ops.optimize(self.loss,
                                          self.learning_rate,
                                          scope='update')
コード例 #25
0
def model_fun(x, is_training):
    x_shape = x.get_shape().as_list()[1:]
    kernel = {'c1': [5, 5, x_shape[2], 20], 'c2': [5, 5, 20, 50]}
    strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
    pool_win_size = {'2': [1, 2, 2, 1]}

    conv = ops.conv2d(x, 'conv1', kernel['c1'], strides['1'], 'SAME')

    conv = ops.residual_bottleneck_block(conv, 'ins_block', is_training, 64)

    with tf.variable_scope('Flatten_layer') as scope:
        conv = ops.flatten(conv)
    with tf.variable_scope('Output_layer') as scope:
        conv = ops.get_hidden_layer(conv,
                                    'output_layer',
                                    10,
                                    activation="none",
                                    initializer='xavier')
    return conv
コード例 #26
0
    def _convnet(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            num_filters = [64, 128, 256, 512, 512, 512, 512]
            if self._image_size == 256:
                num_filters.append(512)

            E = input
            for i, n in enumerate(num_filters):
                E = ops.conv_block(E,
                                   n,
                                   'C{}_{}'.format(n, i),
                                   4,
                                   2,
                                   self._is_train,
                                   self._reuse,
                                   norm=self._norm if i else None,
                                   activation='leaky')
            E = ops.flatten(E)
            mu = ops.mlp(E,
                         self._latent_dim,
                         'FC8_mu',
                         self._is_train,
                         self._reuse,
                         norm=None,
                         activation=None)
            log_sigma = ops.mlp(E,
                                self._latent_dim,
                                'FC8_sigma',
                                self._is_train,
                                self._reuse,
                                norm=None,
                                activation=None)

            z = mu + tf.random_normal(
                shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return z, mu, log_sigma
コード例 #27
0
    def lenet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [5, 5, x_shape[2], 20], 'c2': [5, 5, 20, 50]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
        pool_win_size = {'2': [1, 2, 2, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv1', kernel['c1'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv2', kernel['c2'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1',120, initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 84, initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
コード例 #28
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.name_scope(scope, 'inception_v3', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 299 x 299 x 3
        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                         scope='conv0')
        # 149 x 149 x 32
        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                         scope='conv1')
        # 147 x 147 x 32
        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                         padding='SAME', scope='conv2')
        # 147 x 147 x 64
        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                           stride=2, scope='pool1')
        # 73 x 73 x 64
        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                         scope='conv3')
        # 73 x 73 x 80.
        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                         scope='conv4')
        # 71 x 71 x 192.
        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                           stride=2, scope='pool2')
        # 35 x 35 x 192.
        net = end_points['pool2']
      # Inception blocks
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # mixed: 35 x 35 x 256.
        with tf.variable_scope('mixed_35x35x256a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x256a'] = net
        # mixed_1: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288a'] = net
        # mixed_2: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288b'] = net
        # mixed_3: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                      stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_17x17x768a'] = net
        # mixed4: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 128, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768b'] = net
        # mixed_5: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768c'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768c'] = net
        # mixed_6: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768d'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768d'] = net
        # mixed_7: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768e'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 192, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768e'] = net
        # Auxiliary Head logits
        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
        with tf.variable_scope('aux_logits'):
          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                    padding='VALID')
          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
          # Shape of feature map before the final layer.
          shape = aux_logits.get_shape()
          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                  padding='VALID')
          aux_logits = ops.flatten(aux_logits)
          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                              stddev=0.001, restore=restore_logits)
          end_points['aux_logits'] = aux_logits
        # mixed_8: 8 x 8 x 1280.
        # Note that the scope below is not changed to not void previous
        # checkpoints.
        # (TODO) Fix the scope when appropriate.
        with tf.variable_scope('mixed_17x17x1280a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 192, [1, 1])
            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                   padding='VALID')
          with tf.variable_scope('branch7x7x3'):
            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                     stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool])
          end_points['mixed_17x17x1280a'] = net
        # mixed_9: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048a'] = net
        # mixed_10: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048b'] = net
        # Final pooling and prediction
        with tf.variable_scope('logits'):
          shape = net.get_shape()
          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
          # 1 x 1 x 2048
          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
          net = ops.flatten(net, scope='flatten')
          # 2048
          logits = ops.fc(net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          # 1000
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return logits, end_points
コード例 #29
0
    def __init__(self, session, args):
        self.n_input = args.input_size     # Number of features in each observation
        self.num_obs = 2                   # Number of observations in each state
        self.n_actions = args.num_actions  # Number of output q_values
        self.discount = args.discount      # Discount factor
        self.epsilon = 0.25                # Epsilon
        self.learning_rate = args.learning_rate
        self.regularization = args.reg
        self.use_target = args.use_target
        self.double_q = args.double_q

        self.EWC = args.EWC
        self.EWC_decay = args.EWC_decay

        self.beta = args.beta

	self.layer_sizes = [self.n_input] + args.layer_sizes + [self.n_actions]

        self.session = session

        self.memory = ReplayMemory(args)

        # Tensorflow variables:

        # Model for Q-values

        self.state = tf.placeholder("float", [None, self.n_input])
        with tf.variable_scope('prediction'):
            self.pred_q, self.reg, self.pred_weights = self.network(self.state, self.layer_sizes)
        with tf.variable_scope('target'):
            self.target_pred_q, _, self.target_weights = self.network(self.state, self.layer_sizes)

        self.flattened_weights = flatten(self.pred_weights)
        
        #self.state = tf.placeholder("float", [None, self.num_obs, 84, 84])
        #with tf.variable_scope('prediction'):
        #    self.pred_q, self.reg, self.pred_weights = self.cnn(self.state, [], self.n_actions)
        #with tf.variable_scope('target'):
        #    self.target_pred_q, _, self.target_weights = self.cnn(self.state, [], self.n_actions)

        # Graph for loss function
        self.action = tf.placeholder('int64', [None])
        action_one_hot = tf.one_hot(self.action, self.n_actions, 1.0, 0.0)
        q_acted = tf.reduce_sum(self.pred_q * action_one_hot, reduction_indices=1)

        self.target_q = tf.placeholder("float", [None])
        if self.beta==0:
            self.td_err = self.target_q - q_acted
        else:
            self.td_err = tf.exp(self.beta*self.target_q) - tf.exp(self.beta*q_acted)
        td_loss = tf.reduce_mean(tf.square(self.td_err))# + self.reg


        # Calculations for Elastic Weights
        log_td_loss = tf.log(td_loss)
        grads = flatten(tf.gradients(log_td_loss, self.pred_weights))
        fisher = tf.square(grads)
        fisher = 100 * fisher / tf.reduce_max(fisher) # Max normalise
        self.EWC_strength = fisher

        # Variables for holding dicounted sums
        self.EWC_strength_ = np.zeros(self.EWC_strength.get_shape())
        self.EWC_strength_s = np.zeros(self.EWC_strength.get_shape())
        self.EWC_strength_1 = np.zeros(self.EWC_strength.get_shape())
        self.EWC_strength_1s = np.zeros(self.EWC_strength.get_shape())

        # Placeholders to feed sums into
        self.EWC_strength_ph = tf.placeholder("float", self.EWC_strength.get_shape())
        self.EWC_strength_1_ph = tf.placeholder("float", self.EWC_strength.get_shape())

        #EWC_term = tf.reduce_sum( self.EWC_strength_ph * tf.square(flatten(self.pred_weights) - flatten(self.target_weights)) )
        EWC_term = tf.reduce_sum( self.EWC_strength_ph * tf.square(flatten(self.pred_weights)) - 2 * self.EWC_strength_1_ph * flatten(self.pred_weights) )


        total_loss = td_loss + EWC_term 
        
        self.optim = tf.train.AdamOptimizer(self.learning_rate).minimize(total_loss)

        # Global step (NB: Updated infrequently)
        self.step = tf.Variable(0, name='global_step', trainable=False)
コード例 #30
0
def mobilenetv2_addBias(inputs,
                        num_classes,
                        channel_rito,
                        is_train=True,
                        reuse=False):
    exp = 6  # expansion ratio
    with tf.variable_scope('mobilenetv2'):
        net = ops.conv2d_block(inputs,
                               round(32 * channel_rito),
                               3,
                               2,
                               is_train,
                               name='conv1_1',
                               bias=True)  # size/2

        net = ops.res_block(net,
                            1,
                            round(16 * channel_rito),
                            1,
                            is_train,
                            name='res2_1',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(24 * channel_rito),
                            1,
                            is_train,
                            name='res3_1',
                            bias=True)  # size/2
        net = ops.res_block(net,
                            exp,
                            round(24 * channel_rito),
                            1,
                            is_train,
                            name='res3_2',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(32 * channel_rito),
                            2,
                            is_train,
                            name='res4_1',
                            bias=True)  # size/4
        net = ops.res_block(net,
                            exp,
                            round(32 * channel_rito),
                            1,
                            is_train,
                            name='res4_2',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(32 * channel_rito),
                            1,
                            is_train,
                            name='res4_3',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(64 * channel_rito),
                            1,
                            is_train,
                            name='res5_1',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(64 * channel_rito),
                            1,
                            is_train,
                            name='res5_2',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(64 * channel_rito),
                            1,
                            is_train,
                            name='res5_3',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(64 * channel_rito),
                            1,
                            is_train,
                            name='res5_4',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(96 * channel_rito),
                            2,
                            is_train,
                            name='res6_1',
                            bias=True)  # size/8
        net = ops.res_block(net,
                            exp,
                            round(96 * channel_rito),
                            1,
                            is_train,
                            name='res6_2',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(96 * channel_rito),
                            1,
                            is_train,
                            name='res6_3',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(160 * channel_rito),
                            1,
                            is_train,
                            name='res7_1',
                            bias=True)  # size/8
        net = ops.res_block(net,
                            exp,
                            round(160 * channel_rito),
                            1,
                            is_train,
                            name='res7_2',
                            bias=True)
        net = ops.res_block(net,
                            exp,
                            round(160 * channel_rito),
                            1,
                            is_train,
                            name='res7_3',
                            bias=True)

        net = ops.res_block(net,
                            exp,
                            round(320 * channel_rito),
                            1,
                            is_train,
                            name='res8_1',
                            bias=True,
                            shortcut=False)

        net = ops.pwise_block(net, 128, is_train, name='conv9_1', bias=True)
        net = ops.global_avg(net)
        logits = ops.flatten(
            ops.conv_1x1(net, num_classes, name='logits', bias=True))

        pred = tf.nn.softmax(logits, name='prob')
        return logits, pred