Ejemplo n.º 1
0
    def forward(self, inputs, scope=None):
        with tf.Variable_scope(scope, 'StaticBrnn', [inputs]) as scope:
            # inputs [B, max_time, depth]
            inputs_list = tf.unstack(inputs, axis=1)  # [B, depth]
            outputs_list, _, _ = tf.nn.static_bidirectional_rnn(
                self._fw_cell, self._bw_cell, inputs_list, dtype=tf.float32)

            # 正则化
            filter_weights = lambda vars: [
                x for x in vars if x.op.name.endswith('kernel')
            ]
            tf.contrib.layers.apply_regularization(
                self._rnn_regularizer,
                filter_weights(self._fw_cell.trainable_weights))
            tf.contrib.layers.apply_regularization(
                self._rnn_regularizer,
                filter_weights(self._bw_cell.trainable_weights))

            rnn_outputs = tf.stack(outputs_list,
                                   axis=1)  # [B, max_time, depth]

            # 全连接层
            if self._num_output_units > 0:
                with arg_scope(self._fc_hyperparams):
                    rnn_outputs = fully_connected(rnn_outputs,
                                                  self._num_output_units,
                                                  activation_fn=tf.nn.relu)

            if self._summarize_activations:
                for i in range(len(outputs_list)):
                    tf.summary.histogram(
                        'Activation/{}/Step_{}'.format(scope.name, i),
                        outputs_list[i])
            return rnn_outputs
Ejemplo n.º 2
0
def neural_network(model = 'lstm', rnn_size=128, num_layers=2):
    if model == 'rnn':
        cell_fun = tf.nn.rnn_cell.BasicRNNCell
    elif model == 'gru':
        cell_fun = tf.nn.rnn_cell.GRUCell
    elif model == 'lstm':
        cell_fun = tf.nn.rnn_cell.BasicLSTMCell
        
    cell = cell_fun(run_size, state_is_tuple=True)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers, state_is_tuple=True)
    
    initial_state = cell.zero_state(batch_size, tf.float32)
    
    with tf.Variable_scope('rnnlm'):
        softmax_w = tf.get_variable('softmax_w', [rnn_size, len(words)+1])
        softmax_b = tf.get_variable('softmax_b', [len(words)+1])
        with tf.device("/cpu:0"):
            embedding = tf.get_variable("embedding", [len(words)+1, rnn_size])
            inputs = tf.nn.embedding_lookup(embedding, input_data)
            
        outputs, last_state = tf.nn.dynamic_rnn(cell,inputs,initial_state=initial_state,scope='rnnlm')
        output = tf.reshape(outputs, [-1, rnn_size])
        
        logits = tf.matmul(output, softmax_w) + softmax_b
        probs = tf.nn.softmax(logits)
        return logits, last_state, probs, cell, initial_state
Ejemplo n.º 3
0
def discriminator(inputs, labels, reuse=None):
    with tf.Variable_scope('discriminator') as scope:
        if reuse:
            scope.reuse_variable()
            inputs = tf.concat([inputs, labels], 1)
            hidden = tf.layers.dense(inputs, n_hidden, activation =tf.nn.relu)
            output = tf.layers.dense(hidden, 1, activation=None)

        return output
Ejemplo n.º 4
0
def cross_layer2(x0, x, name):
    with tf.Variable_scope(name):
        input_dim = x0.get_shape().as_list[1]
        w = tf.get_variable(
            "weight", [input_dim],
            initializer=tf.truncated_normal_initializer(stddev=0.01))
        b = tf.get_variable(
            "bias", [input_dim],
            initializer=tf.truncated_normal_initializer(stddev=0.01))
        xb = tf.tensordot(tf.reshape(x, [-1, 1, input_dim]), w, 1)
        return x0 * xb + b + x
Ejemplo n.º 5
0
def cross_layer(x0, x, name):
    with tf.Variable_scope(name):
        input_dim = x0.get_shape().as_list[1]
        w = tf.get_variable(
            "weight", [input_dim],
            initializer=tf.truncated_normal_initializer(stddev=0.01))
        b = tf.get_variable(
            "bias", [input_dim],
            initializer=tf.truncated_normal_initializer(stddev=0.01))
        xx0 = tf.expand_dims(x0, -1)
        mat = tf.matmul(xx0, tf.reshape(x, [-1, 1, input_dim]))
        return tf.tensordot(mat, w, 1) + b + x
Ejemplo n.º 6
0
    def fc_layer(self, x, name):
        with tf.Variable_scope(name):
            shape = x.get_shape().as_list()
            dim = 1
            for i in shape[1:]:
                dim *= 1
            x = tf.reshape(x, [-1, dim])
            w = self.get_fc_weight(name)
            b = self.get_bias(name)

            result = tf.nn.bias_add(tf.matmul(x, w), b)
            return result
Ejemplo n.º 7
0
    def _build_net(self):
        # ------ build evaluate_net -----
        self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s')
        self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculate loss

        with tf.variable_scope('eval_net'):
            # c_names(collections_names) 在更新target_net参数时会用到
            # n_l1是第一层神经元的数量,w_initializer, b_initializer分别是weight和bias的初始化值
            # TODO: 为什么w和b的初始化值只有单个值?不应该是矩阵和向量吗?
            c_names, n_l1, w_initializer, b_initializer = ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)

            # eval_net第一层,collections在更新target_net参数时用到
            with tf.variable_scope('l1'):
                w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) # collections属性是一个graph collections keys的列表,用于添加变量至对应名字下的collection中
                b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
                l1 = tf.nn.relt(tf.matmul(self.s, w1) + b1)

            # eval_net第二层,collections在更新target_net参数时会用到
            with tf.variable_scope('l2'):
                w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
                b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
                self.q_eval = tf.matmul(l1, w2) + b2

        with tf.Variable_scope('loss'):
            self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
        
        with tf.variable_scope('train'):
            self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)

        # ----- build target_net -----
        self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_')
        with tf.variable_scope('target_net'):
            # c_names(collections_names) are the collections to store variables
            c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]

            # 第一层,c_names(collections_names)在更新target_net参数时会用到
            with tf.variable_scope('l1'):
                w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
                b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
                l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
            
            # 第一层,c_names(collections_names)在更新target_net参数时会用到
            with tf.variable_scope('l2'):
                w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
                b2 = tf.get_variable('b2', [1, self.n_actions], initializer=w_initializer, collections=c_names)
                self.q_next = tf.matmul(l1, w2) + b2
Ejemplo n.º 8
0
    def add_projections(self, node_tensors):
        """Add projections to the composition vectors to compute the raw sentiment scores
        Hint: Reuse the "Projection" variable_scope here
        Args:
            node_tensors: tensor(?, embed_size)
        Returns:
            output: tensor(?, label_size)
        """
        logits = None
        ### YOUR CODE HERE
        with tf.Variable_scope("Projection", reuse=True):
            U = tf.get_variable("U")
            bs = tf.get_variable("bs")
        logits = tf.matmul(node_tensors, U) + bs

        ### END YOUR CODE
        return logits
Ejemplo n.º 9
0
def conv_net(x_dict,n_classes,dropout,reuse,is_training):
    with tf.Variable_scope('ConvNet',reuse=reuse):
        x = x_dict['images']
        x = tf.reshape(x,shape=[-1,28,28,1])

        conv1 = tf.layers.conv2d(x,32,5,activation=tf.nn.relu)

        conv1 = tf.layers.max_pooling2d(conv1,2,2)
        conv2 = tf.layers.conv2d(conv1,64,3,activation=tf.nn.relu)

        conv2 = tf.layers.max_pooling2d(conv2,2,2)

        fc1 = tf.contrib.layers.flatten(conv2)

        fc1 = tf.layers.dense(fc1,1024)

        fc1 = tf.layers.dropout(fc1,rate=dropout,training=is_training)

        out = tf.layers.dense(fc1,n_classes)

    return out
Ejemplo n.º 10
0
def inception_v3_base(inputs,scope=None):
    # key point saved in end_points
    end_points ={}
    with tf.Variable_scope(scope,'InceptionV3',[inputs]):
    # using arg_scope set the parameer for the conv2d,max_pool2d,avg_pool2d
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding="VALID"):
    # the part not in inception module
    # construction (tensor,output passage,convolution kernel number,stride ,padding)
    # if input 299*299*3 output is 35*35*192,Spatial dimension is less and  output passway is more
            net = slim.conv2d(inputs,32,[3,3],stride=2,scope='Conv2d_1a_3x3')
            net = slim.conv2d(net,32,[3,3],scope='Conv2d_2a_3x3')
            net = slim.conv2d(net,64,[3,3],padding="SAME",scope='Conv2d_2b_3x3')
            net = slim.max_pool2d(net,[3,3],stride=2,scope="Maxpool_3a_3x3")
            net = slim.conv2d(net,80,[1,1],scope='Conv2d_3b_1x1')
            net = slim.conv2d(net,192,[3,3],scope="Conv2d_4a_3x3")
            net = slim.max_pool2d(net,[3,3],stride=2,scope="MaxPool_5a_3x3") 
    with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='SAME'):
        with tf.variable_scope('Mixed_5b'):
        # inception module mixed_5b have branch0--branch3
            with tf.variable_scope('Branch_0'):
                branch_0 =slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,64,[5,5],scope='Conv2d_0b_5x5')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3,32,[1,1],scope='Conv2d_0b_1x1')
            # concat can merge passway so the number of passway have 32+96+48+64
            # tensor also 35*35
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
        with tf.variable_scope('Mixed_5c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0b_1x1')
                branch_1 = slim.conv2d(branch_1,64,[5,5],scope='Conv_1_0c_5x5')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3,64,[1,1],scope="Conv2d_0b_1x1")
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
        with tf.variable_scope('Mixed_5d'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,64,[5,5],scope='Conv2d_0b_5x5')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3,64,[1,1],scope="Conv2d_0b_1x1")
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
        with tf.variable_scope('Mixed_6a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,384,[1,1],stride=2,padding='VALID',scope='Conv2d_1a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,96,[3,3],scope='Conv2d_0b_5x5')
                branch_1 = slim.conv2d(branch_1,96,[3,3],stride=2,padding='VALID',scope="Conv2d_1a_1x1")
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='MaxPool_1a_1x1')
            net = tf.concat([branch_0,branch_1,branch_2],3) 
        with tf.variable_scope('Mixed_6b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,128,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,128,[1,7],scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,128,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,128,[7,1],scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2,128,[1,7],scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2,128,[7,1],scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope="AvgPool_0a_3x3")
                branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3) 
        with tf.variable_scope('Mixed_6c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,160,[1,7],scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope="AvgPool_0a_3x3")
                branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)  
        with tf.variable_scope('Mixed_6d'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,160,[1,7],scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,128,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope="AvgPool_0a_3x3")
                branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)       
        with tf.variable_scope('Mixed_6e'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,192,[1,7],scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,192,[7,1],scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2,192,[7,1],scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net,[3,3],scope="AvgPool_0a_3x3")
                branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0,branch_1,branch_2,branch_3],3) 
        end_points['Mixed_6e'] = net
        with tf.variable_scope('Mixed_7a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                branch_0 = slim.conv2d(branch_0,320,[3,3],stride=2,padding='VALID',scope='Conv2d_1a_3x3')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(banch_1,192,[1,7],scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
                branch_1 = slim.conv2d(branch_1,192,[3,3],stride=2,padding='VALID',scope='Conv2d_1a_3x3')
            with tf.variable)scope('Branch_2'):
                branch_2 = slim.max_pool2d(net,[3.3],stride=2,padding='VALID',scope='MaxPool_1a_3x3')
            net = tf.concat([branch_0,branch_1,branch_2],3) 
        with tf.variable_scope('Mixed_7b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,320,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net,384,[1,1],scope='Conv2d_0a_1x1')
                branch_1 = tf.concat([
                    slim.conv2d(branch_1,384,[1,3],scope="Conv2d_0b_1x3"),
                    slim.conv2d(branch_1,384,[3,1],scope='Conv2d_0b_3x1')],3)
            with tf.variable)scope('Branch_2'):
                branch_2 = slim.conv2d(net,448,[1.1],scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,384,[3,3],scope='Conv2d_0b_3x3')
                branch_2 = tf.concat([
                    slim.conv2d(branch_2,384,[1,3],scope='Conv2d_0c_1x3'),
                    slim.conv2d(branch_2,384,[3,1],scope="Conv2d_0d_3x1")],3)
Ejemplo n.º 11
0
def mobilenet(inputs,
              num_classes=1000,
              is_training=True,
              width_multiplier=1,
              scope='MobileNet'):
    '''
    Args:
        inputs: a tensor of size [batch_size,height,width,channels]
        num_classes: number of predicted classes
        is_training: model is being trained
        scope: scope for the variables
    Returns:
        logits
        end_points
    '''
    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        #skip pointwise by setting num_output=None
        depthwise_conv = slim.separable_convolution2d(inputs,
                                                      num_outputs=None,
                                                      stride=_stride,
                                                      depth_multiplier=1,
                                                      kernel_size=[3, 3],
                                                      scope=sc +
                                                      '/depthwise_conv')
        bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')
        pointwise_conv = slim.convolution2d(bn,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            scope=sc + '/pointwise_conv')
        bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')
        return bn

    with tf.Variable_scope(scope) as sc:
        end_points_collection = sc.name + '_end_points'
        with slim.arg_scope([slim.convolution2d, slim.separable_convolution2d],
                            activation_fn=None,
                            outputs_collections=[end_points_collection]):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                activation_fn=tf.nn.relu,
                                fused=True):
                net = slim.convolution2d(inputs,
                                         round(32 * width_multiplier), [3, 3],
                                         stride=2,
                                         padding='same',
                                         scope='conv_1')
                net = slim.batch_norm(net, scope='conv_1/batch_norm')
                net = _depthwise_separable_conv(net,
                                                64,
                                                width_multiplier,
                                                sc='conv_ds_2')
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_3')
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                sc='conv_ds_4')
                net = _depthwise_separable_conv(net,
                                                256,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_5')
                net = _depthwise_separable_conv(net,
                                                256,
                                                width_multiplier,
                                                sc='conv_ds_6')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_7')

                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_8')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_9')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_10')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_11')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_12')

                net = _depthwise_separable_conv(net,
                                                1024,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_13')
                net = _depthwise_separable_conv(net,
                                                1024,
                                                width_multiplier,
                                                sc='conv_ds_14')
                net = slim.avg_pool2d(net, [7, 7], scope='avg_pool_15')

        end_points = slim.utils.convert_collection_to_dict(
            end_points_collection)
        logits = slim.fully_connected(net,
                                      num_classes,
                                      activation_fn=None,
                                      scope='fc_16')
        predictions = slim.softmax(logits, scope='predictions')

        end_points['Logits'] = logits
        end_points['Predictions'] = predictions

    return logits, end_points
Ejemplo n.º 12
0
    data_y = np.zeros([batch_size, batch_pation_length], dtype= np.int32)# 200 * 50000
    for i in range(batch_size):
        data_x = raw_x[batch_pation_length*i: batch_pation_length*(i+1)]
        data_y = raw_y[batch_pation_length*i: batch_pation_length*(i+1)]
    epoch_size = batch_pation_length // num_step #迭代次数
    for i in range(epoch_size):
        x = data_x[:, i*num_step:(i+1)*num_step]
        y = data_y[:, i*num_step:(i+1)*num_step]
        yield (x,y) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

def gen_epochs(n, num_steps):
    for i in range(n):
        yield gen_batch(gen_data(),batch_size, num_step=num_steps)


#定义输入输出格式
x = tf.placeholder(tf.int32, [batch_size, numstep], name='x') # 200*5
y = tf.placeholder(tf.int32, [batch_size, numstep], name='y') # 200*5
init_state = tf.zeros([batch_size, state_size])  #初始化状态, 200*4
# rnn输入
x_one_hot = tf.one_hot(x, num_class)
rnn_input = tf.unstack(x_one_hot, axis=1)
#RNN输入
with tf.Variable_scope('rnn_cell'):
    W = tf.get_variable('W', [num_class + state_size, state_size])
    b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
    



Ejemplo n.º 13
0
x = tf.placeholder(shape=[batch_size, word_size], dtype=tf.int32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

embeddings = tf.constant(embeddings)
embed = tf.nn.embedding_lookup(embeddings, x)

gru = tf.nn.rnn_cell.GRUCell(num_units=16,
                             reuse=tf.AUTO_REUSE,
                             activation=tf.nn.elu)
state = gru.zero_state(batch_size, dtype=tf.float32)
lis = []
with tf.variable_scope('RNN'):
    for timestep in range(word_size):
        if timestep == 1:
            tf.Variable_scope().reuse_variables()
        (cell_output, state) = gru(embed[:, timestep], state)
    out_put = state

lay1 = tf.nn.elu(ml.layer_basic(out_put, 4))
lay2 = ml.layer_basic(ml.bn_with_wb(lay1), 1)
y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))
Ejemplo n.º 14
0
def inception_v3_base(inputs, scope=None):

    end_points = {}
    with tf.variable_scope(scope, 'InceptionV3', [inputs]):
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1,
                            padding='VALID'):
            net = slim.conv2d(inputss,
                              32, [3, 3],
                              stride=2,
                              scope='Conv2d_1a_3x3')
            net = slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3x3')
            net = slim.conv2d(net,
                              64, [3, 3],
                              padding='SAME',
                              scope='Conv2d_2b_3x3')
            net = slim.max_pool2d(net, [3, 3],
                                  stride=2,
                                  scope='MaxPool_3a_3x3')
            net = slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1x1')
            net = slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3x3')
            net = slim.max_pool2d(netm[3, 3], stride=2, scope='MaxPool_5a_3x3')

####
# 4 branches 0-3
# 0. 64* 1*1 conv
# 1. 48* 1*1 then 64* 5*5 conv
# 2. 96* 3*3 conv
# 3. 3*3 mpool then 32* 1*1 conv
# tf.concat 合并输出
# 64+64+96+32=256
# output is 35*35*256
# Inception Module.1 输出的图片尺寸均为35*35,But Inception Module last 2 will change the channels
####
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1,
                            padding='SAME'):
            with tf.variable_scope('Mixed_5d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           64, [1, 1],
                                           scop='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           48, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           64, [5, 5],
                                           scope='Conv2d_0b_5x5')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           64, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           96, [3, 3],
                                           scope='Conv2d 0b 3x3')
                    branch_2 = slim.conv2d(branch_2,
                                           96, [3, 3],
                                           scope='Conv2d_0c_3x3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           32, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

#####
###
#Inception Modules 2.
###
#####
#
# 5 Inception Mudule included
#
# 1st Inception Module was named as Mixed_6a
# 3 branches
# 384* 3*3 conv
# stride 2, padding = VALID, size = 17*17
# 2ed, 3 branches 64* 1*1 and 2* 96* 3*3 conv
# 3rd, stride=2, padding=VALID, 17*17*96
#
#
# THE 3rd branches is 3*3 mpool, stride=2, padding=VALID, so the size output is 17*17*256
# use the TF.CONCAT combine the 3 branches into 1, the final output is 17*17*(384+96+256)=17*17*768
# the photo's size and channel have no changes.
#
#
            with tf.variable_scope('Mixed_6a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           384, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='Conv2d_1a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d('net',
                                           64, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           96, [3, 3],
                                           scope='Conv2d_0b_3x3')
                    branch_1 = slim.cnov2d(branch_1,
                                           96, [3, 3],
                                           stride=2,
                                           scope='Conv2d_1a_1x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.max_pool2d(net, [3, 3],
                                               stride=2,
                                               padding='VALID',
                                               scope='MaxPool_1a_3x3')
                net = tf.concat([branche_0, branch_1, branch_2], 3)

#######
# Inception Modules 2.
####
# 2ed Inception --Mixed_6b
# 4 branches
# 1. 192* 1*1 conv
# 2. consist of 3 conv, 1.. 128* 1*1  2.. 128* 1*7 3.. 192* 7*1 Factorization into small convolutions
# 参数量减少
# add one activation function add the non-liner
# 3. 5 conv, consist of  1*1 * 128channels , 7*1 *128, 1*7 *128, 7*1 * 128, 1*7 *192
# 4. 3*3 mpool, 1*1 *192
# tf.concat
# tensor size= 17*17*(192+192+192+192)=17*17*768
#######
            with tf.variable_scope('Mixed_6b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           128, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           128, [1, 7],
                                           scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [7, 1],
                                           scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branche_2'):
                    branch_2 = slim.conv2d(net,
                                           128, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           128, [7, 1],
                                           scope='Conv2d_0b_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           128, [1, 7],
                                           scope='Conv2d_0c_1x7')
                    branch_2 = slim.conv2d(branch_2,
                                           128, [7, 1],
                                           scope='Conv2d_0d_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [1, 7],
                                           scope='Conv2d_0e_1x7')
                with tf.Variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
#####
# Inception Module -- Mixed_6c
#####
# Similar to mixed_6b
# 2ed and 3rd branch channels is different
# 128 to 160
# the final out is no change, 192
# the others is the same
# Inception module 每经过一个, the tensor size is no change, the feature is refined once,  the variable conv and non-linear have great contribution to the performance.
#####
            with tf.variable_scope('Mixed_6c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           160, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           160, [1, 7],
                                           scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [7, 1],
                                           scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           160, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [7, 1],
                                           scope='Conv2d_0b_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [1, 7],
                                           scope='Conv2d_0c_1x7')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [7, 1],
                                           scope='Conv2d_0d_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [1, 7],
                                           scope='Conv2d_0e_1x7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
####
# Inception Module --Mixed_6d
###
# the same to the --Mixed_6c
####
            with tf.variable_scope('Mixed_6d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = sliml.conv2d(net,
                                            192, [1, 1],
                                            scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           160, [1, 1],
                                           scope='Conv22d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           160, [1, 7],
                                           scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [7, 1],
                                           scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           160, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [7, 1],
                                           scope='Conv2d_0b_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [1, 7],
                                           scope='Conv2d_0c_1x7')
                    branch_2 = slim.conv2d(branch_2,
                                           160, [7, 1],
                                           scope='Conv2d_0d_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [1, 7],
                                           scope='Conv2d_0e_1x7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

############
## Mix_6e
##########
## the same to the before
## Inception module 2. We put the Mixed_6e into  end_points, as the Auxiliary Classifier as "Classifier"
###########

            with tf.variable_scope('Mixed_6e'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [1, 7],
                                           scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [7, 1],
                                           scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [7, 1],
                                           scope='Conv2d_0b_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [1, 7],
                                           scope='Conv2d_0c_1x7')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [7, 1],
                                           scope='Conv2d_0d_7x1')
                    branch_2 = slim.conv2d(branch_2,
                                           192, [1, 7],
                                           scope='Conv2d_0e_1x7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
            end_points['Mixed_6e'] = net
            ###########################################
            ##
            ## Inception  Module 3.
            # 3 Inception modules
            # the later 2 moudles are similar
            # the 1st was named as Mixed_7a, 3 branches were included
            # 1..1*1 * 192 conv     3*3 * 320 conv stride=2, padding=VALID     2.. 1*1 * 192, 1*7 * 192, 7*1 * 192, 3*3 * 192, stride = 2, padding = VALID, the output is 8*8 * 192
            # the 3rd branch is 3*3  mpool, stride = 2.padding=VALID output is  8*8 * 768
            # tf.concat combine together The channels, 输出的tensor size 为8*8*(320+192+768)=8*8*1280
            # Inception Module begins, the output size photoes is reduced , the channel is add up, tensor size is dropping.
            ##########################################

            with tf.variable_scope('Mixed_7a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_0 = slim.conv2d(branch_0,
                                           320, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='Conv2d_1a_3x3')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           192, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [1, 7],
                                           scope='Conv2d_ob_1x7')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [7, 1],
                                           scope='Conv2d_0c_7x1')
                    branch_1 = slim.conv2d(branch_1,
                                           192, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='Conv2d_1a_3x3')
                with tf.variable_socpe('Branch_2'):
                    branch_2 = slim.max_pool2d(net, [3, 3],
                                               stride=2,
                                               padding='VALID',
                                               scope='MaxPool_1a_3x3')
                net = tf.concat([branch_0, branch_1, branch_2], 3)

########################################
##
# Inception Module 3.
# 2 Module
# 4 branches
# 1..   1*1 *320
# 2..   1*1 * 384 convs, and give 2 branches, 1*3 *384 and another is 3*1 * 384, then tf.concat combine the 2 branches, the outout size of tensor is 8*8 *(384+384)=8*8* 768
# 3..   1*1 * 448 convs, then 3*3 * 384, dvivided into 2 branches: 1*3 * 384 and 3*1 * 384, and the combined as 8*8*768
# 4..   3*3 mpool, 1*1 * 192convs,
# tf.concat the 4 branches, the tensor size is 8*8*(320+768+768+192)=8*8*2048
# the output is 2048 Channels
#######################################

            with tf.variable_scope('Miexed_7b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           320, [1, 1],
                                           scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           384, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = tf.concat([
                        slim.conv2d(
                            branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
                        slim.conv2d(
                            branch_1, 384, [3, 1], scope='Conv2d_0b_3x1')
                    ], 3)
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           448, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           384, [3, 3],
                                           scope='Conv2d_0b_3x3')
                    branch_2 = tf.concat([
                        slim.conv2d(
                            branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
                        slim.conv2d(
                            branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')
                    ], 3)
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

#####################
# Mixed_7c is the last Inception Module
# But it's the copy version of the Mixed_7b, the output size is 8*8*2048
#####
# Inception Module is the result , and we return it as the inception_v3_base function's final result
#####################

            with tf.varialbe_scope('Mixed_7c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,
                                           320, [1, 1],
                                           scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,
                                           384, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_1 = tf.concat([
                        slim.conv2d(
                            branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
                        slim.conv2d(
                            branch_1, 384, [3, 1], scope='Conv2d_0c_3x1')
                    ], 3)
                with tf.variale_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,
                                           448, [1, 1],
                                           scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,
                                           384, [3, 3],
                                           scope='Conv3d_0b_3x3')
                    branch_2 = tf.concat([
                        slim.conv2d(
                            branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
                        slim.conv2d(
                            branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')
                    ], 3)
                with tf.variale_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3],
                                               scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,
                                           192, [1, 1],
                                           scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
            return net, end_points