Example #1
0
def generator(z):
    with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):

        with tf.variable_scope("linear"):
            linear = clayers.fully_connected(z, 1024 * 4 * 4)

        with tf.variable_scope("conv1_transp"):
            # Reshape as 4x4 images
            conv1 = tf.reshape(linear, (-1, 4, 4, 1024))
            conv1 = default_conv2d_transpose(conv1, 512)
            conv1 = layers.batch_normalization(conv1)
            conv1 = nn.relu(conv1)

        with tf.variable_scope("conv2_transp"):
            conv2 = default_conv2d_transpose(conv1, 256)
            conv2 = layers.batch_normalization(conv2)
            conv2 = nn.relu(conv2)

        with tf.variable_scope("conv3_transp"):
            conv3 = default_conv2d_transpose(conv2, 128)
            conv3 = layers.batch_normalization(conv3)
            conv3 = nn.relu(conv3)

        with tf.variable_scope("conv4_transp"):
            conv4 = default_conv2d_transpose(conv3, 3)

        with tf.variable_scope("out"):
            out = tf.tanh(conv4)
    return out
Example #2
0
    def setUp(self):
        super(AxiomsTest, self).setUp()

        # Make a linear model for testing.
        graph_lin = Graph()

        with graph_lin.as_default():
            x_lin = placeholder('float32', (None, self.input_size))
            y_lin = x_lin @ self.model_lin_weights + self.model_lin_bias

        self.model_lin = ModelWrapper(graph_lin, x_lin, y_lin)

        # Make a deeper model for testing.
        graph_deep = Graph()

        with graph_deep.as_default():
            x_deep = placeholder('float32', (None, self.input_size))
            z1_deep = (x_deep @ self.model_deep_weights_1 +
                       self.model_deep_bias_1)
            z2_deep = relu(z1_deep)
            z3_deep = (z2_deep @ self.model_deep_weights_2 +
                       self.model_deep_bias_2)
            z4_deep = relu(z3_deep)
            y_deep = (z4_deep @ self.model_deep_weights_3 +
                      self.model_deep_bias_3)

        self.model_deep = ModelWrapper(graph_deep, x_deep, y_deep,
                                       dict(layer2=z2_deep, layer3=z3_deep))

        self.layer2 = 'layer2'
        self.layer3 = 'layer3'
Example #3
0
def cnn_graph(x, keep_prob, size, captcha_list = CAPTCHA_LIST, captcha_len = CAPTCHA_LEN):
    x_image = reshape(x, shape = [-1, size[0], size[1], 1])
    w_conv1 = weight_variable([3, 3, 1, 32])
    b_conv1 = bias_variable([32])
    h_conv1 = relu(conv2d(x_image, w_conv1) + b_conv1)
    h_pool1 = max_pool2d(h_conv1)
    h_drop1 = dropout(h_pool1, rate = 1 - keep_prob)
    w_conv2 = weight_variable([3, 3, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = relu(conv2d(h_drop1, w_conv2) + b_conv2)
    h_pool2 = max_pool2d(h_conv2)
    h_drop2 = dropout(h_pool2, rate = 1 - keep_prob)
    w_conv3 = weight_variable([3, 3, 64, 64])
    b_conv3 = bias_variable([64])
    h_conv3 = relu(conv2d(h_drop2, w_conv3) + b_conv3)
    h_pool3 = max_pool2d(h_conv3)
    h_drop3 = dropout(h_pool3, rate = 1 - keep_prob)
    image_height = int(h_drop3.shape[1])
    image_width = int(h_drop3.shape[2])
    w_fc = weight_variable([image_height * image_width * 64, 1024])
    b_fc = bias_variable([1024])
    h_drop3_re = reshape(h_drop3, [-1, image_height * image_width * 64])
    h_fc = relu(matmul(h_drop3_re, w_fc) + b_fc)
    h_drop_fc = dropout(h_fc, rate = 1 - keep_prob)
    w_out = weight_variable([1024, len(captcha_list) * captcha_len])
    b_out = bias_variable([len(captcha_list) * captcha_len])
    y_conv = matmul(h_drop_fc, w_out) + b_out
    return y_conv
Example #4
0
def generator(z, reuse=tf.AUTO_REUSE):
    with tf.variable_scope('gen', reuse=reuse):
        with tf.variable_scope("linear"):
            linear = clayers.fully_connected(z, 128 * 2 * 2)

        with tf.variable_scope("conv1_transp"):
            # Reshape as 4x4 images
            conv1 = tf.reshape(linear, (-1, 2, 2, 128))
            conv1 = default_conv2d_transpose(conv1, 64)
            conv1 = nn.relu(conv1)

        with tf.variable_scope("conv2_transp"):
            conv2 = default_conv2d_transpose(conv1, 32)
            conv2 = nn.relu(conv2)

        with tf.variable_scope("conv3_transp"):
            conv3 = default_conv2d_transpose(conv2, 16)
            conv3 = nn.relu(conv3)

        with tf.variable_scope("conv4_transp"):
            conv4 = default_conv2d_transpose(conv3, 1)

        with tf.variable_scope("out"):
            out = tf.tanh(conv4)
        return out
    def call(self, inputs):

        #Layer 1
        Z_bottleneck = self.bottleneck(inputs)
        Z_bottleneck = self.bn_bottleneck(Z_bottleneck)
        Z_bottleneck = relu(Z_bottleneck)

        Z_maxpool = self.max_pool(inputs)

        #Layer 2
        Z1 = self.conv_f1(Z_maxpool)
        Z1 = self.bn_f1(Z1)
        Z1 = relu(Z1)

        Z2 = self.conv_f10(Z_bottleneck)
        Z2 = self.bn_f10(Z2)
        Z2 = relu(Z2)

        Z3 = self.conv_f20(Z_bottleneck)
        Z3 = self.bn_f20(Z3)
        Z3 = relu(Z3)

        Z4 = self.conv_f40(Z_bottleneck)
        Z4 = self.bn_f40(Z4)
        Z4 = relu(Z4)

        #Layer 3
        Z = Concatenate()([Z1, Z2, Z3, Z4])
        return Z
Example #6
0
def discriminator(x, alpha=0.01, reuse=tf.AUTO_REUSE):
    with tf.variable_scope('dis', reuse=reuse):
        with tf.variable_scope("conv1"):
            conv1 = default_conv2d(x, 16)
            conv1 = nn.relu(conv1)

        with tf.variable_scope("conv2"):
            conv2 = default_conv2d(conv1, 32)
            conv2 = nn.relu(conv2)

        with tf.variable_scope("conv3"):
            conv3 = default_conv2d(conv2, 64)
            conv3 = nn.relu(conv3)

        with tf.variable_scope("conv4"):
            conv4 = default_conv2d(conv3, 128)
            conv4 = nn.relu(conv4)

        with tf.variable_scope("linear"):
            linear = clayers.flatten(conv4)
            linear = clayers.fully_connected(linear, 1)

        with tf.variable_scope("out"):
            out = nn.sigmoid(linear)
    return out
Example #7
0
    def __init__(self, sparse, guidance_map, params, reuse=False):
        with tf.variable_scope("Local", reuse=reuse) as scope:
            sparse = tf.reshape(sparse, [-1, 200, 200, 1])
            guidance_map = tf.reshape(guidance_map, [-1, 200, 200, 1])
            x = sparse + guidance_map

            x = conv2d(x, 32, (3, 3), strides=(2, 2), padding='same')
            x = relu(x)
            x = conv2d(x, 64, (3, 3), strides=(2, 2), padding='same')
            x = relu(x)
            x = conv2d_transpose(x,
                                 64, (5, 5),
                                 strides=(2, 2),
                                 padding='same',
                                 use_bias=False)
            x = batch_normalization(x)
            x = relu(x)
            x = conv2d_transpose(x,
                                 2, (5, 5),
                                 strides=(2, 2),
                                 padding='same',
                                 use_bias=False)
            self.output = x
        self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                            scope="Local")
Example #8
0
    def build(self, x, reuse=None):
        """ TODO: define your model (2 conv layers and 2 fc layers?)
        x: input image
        logit: network output w/o softmax """
        with tf.variable_scope('model', reuse=reuse):

            W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))

            L1 = relu(conv2d(x, W1, strides=[1, 1, 1, 1], padding='SAME'))
            L1 = max_pool(L1, ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1], padding='SAME')

            W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
            L2 = relu(conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME'))
            L2 = max_pool(L2, ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1], padding='SAME')

            L2_flat = tf.reshape(L2, [-1, 7 * 7 * 64])
            W3 = tf.get_variable("W3", shape=[7 * 7 * 64, 10],
                                 initializer=xavier_initializer())
            b = tf.Variable(tf.random_normal([10]))

            logit = tf.matmul(L2_flat, W3) + b


        return logit
Example #9
0
 def __init__(self, images, sparse, params, reuse=False):
     with tf.variable_scope("Global", reuse=reuse) as scope:
         sparse = tf.reshape(sparse, [-1, 200, 200, 1])
         images = tf.reshape(images, [-1, 200, 200, 3])
         x = tf.concat([images, sparse], axis=3)
         x = tf.cast(x, dtype=tf.float32)
         x = conv2d(x, 32, (3, 3), strides=(2, 2), padding='same')
         x = relu(x)
         x = conv2d(x, 64, (3, 3), strides=(2, 2), padding='same')
         x = relu(x)
         x = conv2d_transpose(x,
                              64, (5, 5),
                              strides=(2, 2),
                              padding='same',
                              use_bias=False)
         x = batch_normalization(x)
         x = relu(x)
         x = conv2d_transpose(x,
                              3, (5, 5),
                              strides=(2, 2),
                              padding='same',
                              use_bias=False)
         self.output = x
     self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope="Global")
Example #10
0
 def call(self, x, mask=None):
     x = self.lstm0(x, mask=mask, training=True)
     x = relu(x)
     x = self.lstm1(x, mask=mask, training=True)
     x = relu(x)
     x = self.dense(x)
     return x
Example #11
0
    def __call__(self,
                 state,
                 inputs=None,
                 scope=None):  # inputs are observations
        """
        Basic HCNN with Architectural Teacher-Forcing (ATF): new_state = A * tanh( state - diff([id 0]*state,expand(inputs)) ).
        : expand(.) zero pads vector to same size as state

        state: dim [depth,1,n]
        inputs: dim [depth,1,m]
        output: dim [depth,1,m]
        """

        # TODO: What is this? Write comment or remove.
        if inputs is not None:
            in_shape = inputs.get_shape()
            in_len = len(in_shape)
            if in_len == 3:
                obs = inputs
            if in_len <= 2:
                obs = tf.expand_dims(inputs, 0)
            if in_len == 1:
                obs = tf.expand_dims(obs, 0)
            if self._output_size is None:
                self._output_size = in_shape[2].value
            elif self._output_size != in_shape[2].value:
                raise ValueError(
                    "output_size and input shape are inconsistent.")

        if (self._output_size is None):
            raise ValueError("Output_size is ill defined.")

        if len(state.get_shape()) < 3:
            raise ValueError("State should have 3 dimensions.")

        with vs.variable_scope(
                scope or type(self).__name__) as cell_scope:  # "BasicHCNNCell"
            # returns first output_size num elems from state as tensor
            output = state[:, :, :self._output_size]

            if inputs is not None:
                # zero pads the difference between output and obs to be of the same size as state
                padding = [[0, 0], [0, 0],
                           [0, self._num_units - self._output_size]]
                tar = tf.pad((tf.subtract(output, obs)), padding)
                # get new state.
                new_state = _linear_cube(relu(state - tar),
                                         self._num_units,
                                         sparse_cube=self._sparse_cube,
                                         scope=cell_scope)
            else:
                # When there is no teacher forcing, as with forecast.
                new_state = _linear_cube(relu(state),
                                         self._num_units,
                                         sparse_cube=self._sparse_cube,
                                         scope=cell_scope)
        return new_state, output
Example #12
0
    def call(self, x, training=True):
        y = nn.relu(self.max_pool(self.conv1(x)))
        y = nn.relu(self.max_pool(self.dropout(self.conv2(y), training=training)))

        # Flatten feature matrix
        batch_size = y.shape[0]
        y = tf.reshape(y, (batch_size, -1))

        y = self.dense(y)
        return y
Example #13
0
    def margin_loss(self, x, labels, size_average=True):
        batch_size = x.size(0)

        v_c = tf.math.sqrt(tf.reduce_sum(x**2, axis=2, keepdims=True))

        left = tf.reshape(nn.relu(0.9 - v_c), (batch_size, -1))
        right = tf.reshape(nn.relu(v_c - 0.1), (batch_size, -1))

        loss = labels * left + 0.5 * (1.0 - labels) * right
        loss = tf.metrics.mean(tf.reduce_sum(loss, axis=1))

        return loss
Example #14
0
    def call(self, input_tensor):
        x = nn.relu(input_tensor)
        x = self.conv2a(x)

        x = nn.relu(x)
        x = self.conv2b(x)

        x = nn.relu(x)
        x = self.conv2c(x)

        x += input_tensor
        return x
Example #15
0
    def create_model(self,
                     model_input,
                     vocab_size,
                     l2_penalty=1e-8,
                     is_training=True,
                     input_size=1024 + 128,
                     **unused_params):
        """Creates a Multi Layered Perceptron model.
    """
        # General transform layer
        fc1 = slim.fully_connected(
            model_input,
            input_size,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope='fc1')
        bn1 = slim.batch_norm(fc1, is_training=is_training, scope='bn1')
        relu1 = nn.relu(bn1, name='relu1')

        # Coarse classification
        coarse_scores = slim.fully_connected(
            relu1,
            25,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope='coarse')

        # Concatenate p(coarse) and prior features
        concat = tf.concat([relu1, coarse_scores], -1, name='concat')

        # Specific transform layer
        fc2 = slim.fully_connected(
            concat,
            input_size + 25,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope='fc2')
        bn2 = slim.batch_norm(fc2, is_training=is_training, scope='bn2')
        relu2 = nn.relu(bn2, name='relu2')

        # Final classifier
        classifier = slim.fully_connected(
            relu2,
            vocab_size,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope='classifier')

        final_probs = nn.sigmoid(classifier, name='final_probs')
        coarse_probs = nn.sigmoid(coarse_scores, name='coarse_probs')

        return {"predictions": final_probs, "coarse_predictions": coarse_probs}
Example #16
0
    def call(self, inputs):
        residual = self.downsample(inputs)
        x = self.conv1(inputs)
        x = self.bn1(x)
        x = nn.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        
        x = tf.add(x, residual)
        out = nn.relu(x)

        return out
Example #17
0
 def call(self, inputs, training=False, mask=None):
     x = self.conv1(inputs)
     x = self.bn1(x)
     x = relu(x)
     x = self.pool1(x)
     x = self.conv2(x)
     x = self.bn2(x)
     x = relu(x)
     x = self.pool2(x)
     x = flatten(x)
     x = self.fc1(x)
     x = relu(x)
     x = self.fc2(x)
     return x
Example #18
0
def build_cnn(feat_dim=(1024, 14, 14),
              res_block_dim=128,
              num_res_blocks=0,
              proj_dim=512,
              pooling='maxpool2'):
    C, H, W = feat_dim
    layers = []
    if num_res_blocks > 0:
        layers.append(
            tf.keras.layers.Conv2D(C,
                                   res_block_dim,
                                   kernel_size=(3, 3),
                                   padding=1))
        layers.append(nn.relu())
        C = res_block_dim
        for _ in range(num_res_blocks):
            layers.append(ResidualBlock(C))
    if proj_dim > 0:
        layers.append(
            tf.keras.layers.Conv2D(C, proj_dim, kernel_size=(1, 1), padding=0))
        layers.append(tf.keras.layers.ReLU())
        C = proj_dim
    if pooling == 'maxpool2':
        layers.append(tf.keras.layers.MaxPool2D(kernel_size=(2, 2), stride=2))
        H, W = H // 2, W // 2

    model = tf.keras.Sequential()
    for layer in layers:
        model.add(layer)
    return model, (C, H, W)
Example #19
0
File: DeepQ.py Project: cjjun/DRL
def forward_propogation_sub_network(terminal,parameters,params):
    """
    Forward progogation for each sub-network. Must indicate network layer id.

    Args:
        terminal: A dictionary with key X_(layer),Y_(layer), the input and output port for input and output
        parameters: parameters: A dictionary with key in format W_(layer_id)_(order_id),b_(layer_id)_(order_id).
        params: Network configuration of all layer
    
    Returns:
        result_dict: A diction with key Z1,Z2,...,Z4 as output of each layer.
    """
    result_dict = {}
    for lay in range(len(params)):
        param = params[lay]
        n = len(param) - 1
        A = terminal["X"+str(lay+1)]

        for i in range(1,n):
            W = parameters[ "W"+str(lay+1)+'_'+str(i) ]
            b = parameters[ "b"+str(lay+1)+'_'+str(i) ]
            Z = tf.matmul(W,A) + b 
            A = relu(Z)

        W = parameters[ "W"+str(n)+'_'+str(i) ]
        b = parameters[ "b"+str(n)+'_'+str(i) ]
        Z = tf.matmul(W,A) + b
        cost = compute_cost(Z,terminal[Y+str(lay+1)])

        result_dict[ "Z"+str(lay+1) ] = Z
        result_dict[ "cost"+ +str(lay+1) ] = cost
   
    return result_dict
Example #20
0
def resBlock(x, num):
    x = relu(x)
    conv1 = tcl.conv2d(
        x,
        256,
        3,
        1,
        activation_fn=tf.nn.relu,
        normalizer_fn=tcl.batch_norm,
        weights_initializer=tf.random_normal_initializer(stddev=0.02),
        scope='g_resconv1_' + str(num))
    print(conv1)
    conv2 = tcl.conv2d(
        conv1,
        256,
        3,
        1,
        activation_fn=tf.identity,
        normalizer_fn=tcl.batch_norm,
        weights_initializer=tf.random_normal_initializer(stddev=0.02),
        scope='g_resconv2_' + str(num))
    print(conv2)
    output = tf.add(x, conv2)
    print(output)
    return output
def build_net(sess):
    in_len = 32
    in_dep = 1

    x_hold = tf.placeholder(tf.float32,shape=[None,in_dep*in_len*in_len])
    y_hold = tf.placeholder(tf.float32,shape=[None,2])
    keep_prob = tf.placeholder(tf.float32)

    xt = tf.reshape(x_hold,[-1,in_len,in_len,in_dep])

    #Layer 1 - 5x5 convolution
    w1 = tfac.weight([5,5,in_dep,4])
    b1 = tfac.bias([4])
    c1 = nn.relu(nn.conv2d(xt,w1,strides=[1,2,2,1],padding='VALID')+b1)
    o1 = c1

    #Layer 2 - 3x3 convolution
    w2 = tfac.weight([3,3,4,16])
    b2 = tfac.bias([16])
    c2 = nn.relu(nn.conv2d(o1,w2,strides=[1,2,2,1],padding='VALID')+b2)
    o2 = c2

    #Layer 3 - 3x3 convolution
    w3 = tfac.weight([3,3,16,32])
    b3 = tfac.bias([32])
    c3 = nn.relu(nn.conv2d(o2,w3,strides=[1,1,1,1],padding='VALID')+b3)
    o3 = c3

    dim = 32 * 4*4
        
    #Fully connected layer - 600 units
    of = tf.reshape(o3,[-1,dim])
    w4 = tfac.weight([dim,600])
    b4 = tfac.bias([600])
    o4 = nn.relu(tf.matmul(of,w4)+b4)

    o4 = nn.dropout(o4, keep_prob)

    #Output softmax layer - 2 units
    w5 = tfac.weight([600,2])
    b5 = tfac.bias([2])
    y = nn.softmax(tf.matmul(o4,w5)+b5)

    sess.run(tf.initialize_all_variables())

    return y,x_hold,y_hold,keep_prob
    def setUp(self):
        super(ModelWrapperTest, self).setUp()

        graph = Graph()

        with graph.as_default():
            x = placeholder('float32', (None, 2))
            z1 = relu(x @ self.layer1_weights + self.internal_bias)
            z2 = relu(z1 @ self.layer2_weights + self.internal_bias)
            y = z2 @ self.layer3_weights + self.bias

        self.model = TensorflowModelWrapper(graph, x, y,
                                            dict(x=x, z1=z1, z2=z2, logits=y))

        self.layer0 = 'x'
        self.layer1 = 'z1'
        self.layer2 = z2.name
Example #23
0
def ACT(inputs, act_fn):
    if act_fn == 'relu':
        act = relu(inputs)
    elif act_fn == 'lrelu':
        act = leaky_relu(inputs)
    elif act_fn == 'sigmoid':
        act = sigmoid(inputs)
    return act
Example #24
0
def ConvReluMaxPool(X, weights, bias):
    conv = nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC")
    conv_bias = nn.bias_add(conv, bias, data_format="NHWC")

    relu = nn.relu(conv_bias)
    maxpool = nn.max_pool2d(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
    
    return maxpool
    def call(self, inputs, training=False):
        x = self.layer1(inputs)
        x = self.layer2(x, training=training)
        x = relu(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)

        return x
def net(X, is_training=False):
    X = tf.reshape(X, shape=(-1, num_inputs))
    H1 = tf.nn.relu(tf.matmul(X, W1) + b1)
    if is_training:  # 只在训练模型时使用丢弃法
        H1 = dropout(H1, drop_prob1)  # 在第一层全连接后添加丢弃层
    H2 = nn.relu(tf.matmul(H1, W2) + b2)
    if is_training:
        H2 = dropout(H2, drop_prob2)  # 在第二层全连接后添加丢弃层
    return tf.math.softmax(tf.matmul(H2, W3) + b3)
Example #27
0
    def rnn(self):
        """RNN模型"""
        def lstm_cell():  # lstm核
            return BasicLSTMCell(self.config.hidden_size, state_is_tuple=True)

        def gru_cell():  # gru核
            return GRUCell(self.config.hidden_size)

        def dropout():  # 在每一个rnn核后面加一个dropout层
            if self.config.rnn == 'lstm':
                cell = lstm_cell()
            else:
                cell = gru_cell()
            return DropoutWrapper(
                cell=cell, output_keep_prob=self.config.dropout_keep_prob)

        # 词向量映射
        with tf.device('/gpu:0'):
            embedding = tf.get_variable(
                'embedding',
                [self.config.vocab_size, self.config.embedding_size])
            embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)

        with tf.name_scope('rnn'):
            # 多层rnn网络
            cells = [dropout() for _ in range(self.config.num_layers)]
            rnn_cell = MultiRNNCell(cells=cells, state_is_tuple=True)

            _outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cell,
                                            inputs=embedding_inputs,
                                            dtype=tf.float32)
            last = _outputs[:, -1, :]  # 取最后一个时序作为输出结果

        with tf.name_scope('score'):
            # 全连接层,后面连接dropout以及relu激活
            fc = dense(last, self.config.hidden_size, name='fc1')
            fc = tf.contrib.layers.dropout(fc, self.config.dropout_keep_prob)
            fc = relu(fc)

            # 分类器
            self.logits = dense(fc, self.config.num_classes, name='fc2')
            self.y_predict_class = tf.argmax(tf.nn.softmax(self.logits), 1)

        with tf.name_scope('optimize'):
            # 损失函数,交叉熵
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=self.logits, labels=self.input_y)
            self.loss = tf.reduce_mean(cross_entropy)
            # 优化器
            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.config.learning_rate).minimize(self.loss)

        with tf.name_scope('accuracy'):
            # 准确率
            correct_pred = tf.equal(tf.argmax(self.input_y, 1),
                                    self.y_predict_class)
            self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
Example #28
0
def relu(inputs):
    """
    Relu activation

    Parameters
    ----------
    inputs: Input tensor
    """
    return nn.relu(inputs)
  def __init__(self, block, layers, modality='RGB',
      shortcut_type='B', num_classes=400,dropout=0.5,ST_struc=('A','B','C')):
    self.w_initer=tc.layers.xavier_initializer(tf.float32)
    self.data_format='NCHW'
    self.layer_data_format = 'channels_last' if data_format == 'NHWC' \
        else 'channels_first'
    self.is_training=True

    self.inplanes = 64

    self.input_channel = 3 if modality=='RGB' else 2  # 2 is for flow 
    self.ST_struc=ST_struc

    self.conv1_custom = nn.Conv3d(self.input_channel, 64, kernel_size=(1,7,7), stride=(1,2,2),
                                padding=(0,3,3), bias=False)
    self.conv1_custom = tl.Conv3D(filters=64, kernel_size=(1,7,7), strides=(1,2,2), 
        padding=self.layer_data_format, use_bias=False, kernel_initializer=self.w_initer)
    
    self.depth_3d = sum(layers[:3])# C3D layers are only (res2,res3,res4),  res5 is C2D

    axis = 1 if self.data_format=="NCHW" else -1
    self.bn1 = tl.BatchNormalization(axis=axis, scale=False fused=True)
    # out = self.bn(in, training=True) False for eval
    self.cnt = 0
    self.relu = lambda input : nn.relu(input)
    self.maxpool = tl.MaxPooling3D(pool_size=(2, 3, 3), strides=2, padding='valid',
        data_format=self.layer_data_format) # pooling layer for conv1.
    self.maxpool = tl.MaxPooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), padding='valid',
        data_format=self.layer_data_format) # pooling layer for res2, 3, 4.
    
    self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
    self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
    self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
    self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)

    self.avgpool = tl.AveragePooling2D(pool_size=5, strides=1,
        data_format=self.layer_data_format)                              # pooling layer for res5.
    self.dropout=tl.Dropout(dropout)
    self.fc = tl.Dense(num_classes, use_bias=False, kernel_initializer=self.w_initer)
    # self.fc = nn.Linear(512 * block.expansion, num_classes)

    for m in self.modules():
        if isinstance(m, nn.Conv3d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
        elif isinstance(m, nn.BatchNorm3d):
            m.weight.data.fill_(1)
            m.bias.data.zero_()

    # some private attribute
    self.input_size=(self.input_channel,16,160,160)       # input of the network
    self.input_mean = [0.485, 0.456, 0.406] if modality=='RGB' else [0.5]
    self.input_std = [0.229, 0.224, 0.225] if modality=='RGB' else [np.mean([0.229, 0.224, 0.225])]

    return
Example #30
0
    def call(self, inputs):
        outputs = []
        for i in range(self.num_outputs):
            # First input
            x = self.fuse_layers[i][0](inputs[0])
            for j in range(1, self.num_inputs):
                x = layers.add([x, self.fuse_layers[i][j](inputs[j])])
            out = nn.relu(x)
            outputs.append(out)

        return outputs