def _squeezenet2d(flat_input, keep_prob, n_classes):
     w_ini, b_ini, r_ini = initializers()
     x_multichannel = tf.reshape(
         flat_input, [-1, conf.img_dim[0], conf.img_dim[1], conf.num_ch])
     net = conv2d(
         x_multichannel,
         filters=96,
         kernel_size=7,
         name='conv1',  # keras SqueezeNet uses 3x3 kernel
         kernel_initializer=w_ini,
         bias_initializer=b_ini,
         kernel_regularizer=r_ini)
     net = max_pooling2d(net, pool_size=3, strides=2, name='maxpool1')
     net = fire_module2d(net, 16, 64, name='fire2')
     net = fire_module2d(net, 16, 64, name='fire3')
     net = fire_module2d(net, 32, 128, name='fire4')
     net = max_pooling2d(net, pool_size=3, strides=2, name='maxpool4')
     net = fire_module2d(net, 32, 128, name='fire5')
     net = fire_module2d(net, 48, 192, name='fire6')
     net = fire_module2d(net, 48, 192, name='fire7')
     net = fire_module2d(net, 64, 256, name='fire8')
     net = max_pooling2d(net, pool_size=3, strides=2, name='maxpool8')
     net = fire_module2d(net, 64, 256, name='fire9')
     net = tf.nn.dropout(net, keep_prob=keep_prob, name='dropout9')
     net = conv2d(net,
                  n_classes,
                  1,
                  1,
                  name='conv10',
                  kernel_initializer=w_ini,
                  bias_initializer=b_ini,
                  kernel_regularizer=r_ini)
     logits = tf.reduce_mean(net, axis=[1, 2], name='global_avgpool10')
     return logits
示例#2
0
    def vgg_11(x, keep_proba, is_train, num_classes):
        # https://arxiv.org/abs/1409.1556 --> configuration A
        w_ini, b_ini, r_ini = initializers(conf.bias_init, conf.l2_str)
        x_multichannel = tf.reshape(
            x, [-1, conf.img_dim[0], conf.img_dim[1], conf.num_ch])
        net = _conv2d(x_multichannel, 64, 3, 'conv1')
        net = max_pooling2d(net, 2, 2, 'same', name='maxpool1')
        net = _conv2d(net, 128, 3, 'conv2')
        net = max_pooling2d(net, 2, 2, 'same', name='maxpool2')
        net = _conv2d(net, 256, 3, 'conv3_1')
        net = _conv2d(net, 256, 3, 'conv3_2')
        net = max_pooling2d(net, 2, 2, 'same', name='maxpool3')
        net = _conv2d(net, 512, 3, 'conv4_1')
        net = _conv2d(net, 512, 3, 'conv4_2')
        net = max_pooling2d(net, 2, 2, 'same', name='maxpool4')
        net = _conv2d(net, 512, 3, 'conv5_1')
        net = _conv2d(net, 512, 3, 'conv5_2')
        net = max_pooling2d(net, 2, 2, 'same', name='maxpool5')

        net = flatten(net, name='flat_layer')
        net = _dense(net, 4096, 'fc6')
        net = tf.nn.dropout(net, keep_proba, name='dropout6')
        net = _dense(net, 4096, 'fc7')
        net = tf.nn.dropout(net, keep_proba, name='dropout7')
        # # TODO consider replacing fc layers by conv layers
        # #  as in https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py
        # #  for "if include_top" see https://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py
        # net = _conv2d(net, 4096, 7, 'fc6_replacement', padding='VALID')
        # net = tf.nn.dropout(net, keep_proba, name='dropout7')
        # net = _conv2d(net, 4096, 1, 'fc7_replacement')
        # net = tf.reduce_mean(net, axis=[1, 2], name='global_avg_pool8')
        logits = _dense(net, conf.num_cl, 'logits', activation=None)
        # softmax performed at loss function
        return logits
示例#3
0
 def add_inference(self, inputs, training, nclass):
     df = 'channels_first' if self.data_format == 'NCHW' else 'channels_last'
     conv1 = layers.conv2d(inputs=inputs,
                           filters=32,
                           kernel_size=[5, 5],
                           padding="same",
                           data_format=df,
                           activation=tf.nn.relu)
     pool1 = layers.max_pooling2d(inputs=conv1,
                                  pool_size=[2, 2],
                                  strides=2,
                                  data_format=df)
     conv2 = layers.conv2d(inputs=pool1,
                           filters=64,
                           kernel_size=[3, 3],
                           padding="same",
                           data_format=df,
                           activation=tf.nn.relu)
     pool2 = layers.max_pooling2d(inputs=conv2,
                                  pool_size=[2, 2],
                                  strides=2,
                                  data_format=df)
     pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
     dense = layers.dense(inputs=pool2_flat,
                          units=1024,
                          activation=tf.nn.relu)
     dropout = layers.dropout(inputs=dense, rate=0.4, training=training)
     logits = layers.dense(inputs=dropout, units=nclass)
     return logits
示例#4
0
def vgg_backbone(inputs, **config):
    params_conv = {
        'padding': 'SAME',
        'data_format': config['data_format'],
        'activation': tf.nn.relu,
        'batch_normalization': False,
        'training': config['training'],
        'kernel_reg': config.get('kernel_reg', 0.)
    }
    params_pool = {'padding': 'SAME', 'data_format': config['data_format']}

    with tf.variable_scope('vgg', reuse=tf.AUTO_REUSE):
        x = vgg_block(inputs, 64, 3, 'conv1_1', **params_conv)
        x = vgg_block(x, 64, 3, 'conv1_2', **params_conv)
        x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params_pool)

        x = vgg_block(x, 64, 3, 'conv2_1', **params_conv)
        x = vgg_block(x, 64, 3, 'conv2_2', **params_conv)
        x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params_pool)

        x = vgg_block(x, 128, 3, 'conv3_1', **params_conv)
        x = vgg_block(x, 128, 3, 'conv3_2', **params_conv)
        x = tfl.max_pooling2d(x, 2, 2, name='pool3', **params_pool)

        x = vgg_block(x, 128, 3, 'conv4_1', **params_conv)
        x = vgg_block(x, 128, 3, 'conv4_2', **params_conv)
    return x
示例#5
0
    def forward(self, x):
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            # 2. Common Networks Layers
            self.conv1 = nn.conv2d(inputs=x,
                                   filters=32,
                                   kernel_size=[3, 3],
                                   activation=tf.nn.relu)
            self.conv1 = nn.max_pooling2d(self.conv1, pool_size=3, strides=2)
            self.conv2 = nn.conv2d(inputs=self.conv1,
                                   filters=64,
                                   kernel_size=[3, 3],
                                   activation=tf.nn.relu)
            self.conv2 = nn.max_pooling2d(self.conv2, pool_size=3, strides=2)
            self.conv3 = nn.conv2d(inputs=self.conv2,
                                   filters=64,
                                   kernel_size=[3, 3],
                                   activation=tf.nn.relu)
            self.conv3 = nn.max_pooling2d(self.conv3, pool_size=2, strides=2)
            self.conv4 = nn.conv2d(inputs=self.conv3,
                                   filters=128,
                                   kernel_size=[2, 2],
                                   activation=tf.nn.relu)
            self.conv5 = nn.conv2d(inputs=self.conv4,
                                   filters=64,
                                   kernel_size=[2, 2],
                                   activation=tf.nn.relu)
            self.conv6_1 = nn.conv2d(inputs=self.conv5,
                                     filters=1,
                                     kernel_size=[1, 1],
                                     activation=tf.nn.sigmoid)
            self.conv6_2 = nn.conv2d(inputs=self.conv5,
                                     filters=4,
                                     kernel_size=[1, 1])

        return self.conv6_1, self.conv6_2
示例#6
0
    def _model(self, inputs, mode, **config):
        x = inputs['image']
        if config['data_format'] == 'channels_first':
            x = tf.transpose(x, [0, 3, 1, 2])

        params = {'padding': 'SAME', 'data_format': config['data_format']}

        x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)
        x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)

        x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)
        x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)

        x = tfl.flatten(x)
        x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')
        x = tfl.dense(x, 10, name='fc2')

        if mode == Mode.TRAIN:
            return {'logits': x}
        else:
            return {
                'logits': x,
                'prob': tf.nn.softmax(x),
                'pred': tf.argmax(x, axis=-1)
            }
def f_net(inputs):
    inputs = inputs[0]
    inputs = inputs / 128 - 1.0
    # (640, 640, 3*n) -> ()
    with tf.device('/gpu:0'):
        conv1 = layers.conv2d(
            inputs=inputs, filters=16, kernel_size=(8, 8), strides=1,
            kernel_regularizer=l2_regularizer(scale=1e-2),
            activation=tf.nn.relu, name='conv1')
        print conv1.shape
        pool1 = layers.max_pooling2d(
            inputs=conv1, pool_size=3, strides=4, name='pool1')
        print pool1.shape
        conv2 = layers.conv2d(
            inputs=pool1, filters=16, kernel_size=(5, 5), strides=1,
            kernel_regularizer=l2_regularizer(scale=1e-2),
            activation=tf.nn.relu, name='conv2')
        print conv2.shape
        pool2 = layers.max_pooling2d(
            inputs=conv2, pool_size=3, strides=3, name='pool2')
        print pool2.shape
        conv3 = layers.conv2d(
            inputs=pool2, filters=64, kernel_size=(3, 3), strides=1,
            kernel_regularizer=l2_regularizer(scale=1e-2),
            activation=tf.nn.relu, name='conv3')
        print conv3.shape
        pool3 = layers.max_pooling2d(
            inputs=conv3, pool_size=3, strides=2, name='pool3', )
        print pool3.shape
        depth = pool3.get_shape()[1:].num_elements()
        inputs = tf.reshape(pool3, shape=[-1, depth])
        print inputs.shape
        hid1 = layers.dense(
            inputs=inputs, units=256, activation=tf.nn.relu,
            kernel_regularizer=l2_regularizer(scale=1e-2), name='hid1')
        print hid1.shape
        hid2 = layers.dense(
            inputs=hid1, units=256, activation=tf.nn.relu,
            kernel_regularizer=l2_regularizer(scale=1e-2), name='hid2_adv')
        print hid2.shape
        adv = layers.dense(
            inputs=hid2, units=len(AGENT_ACTIONS), activation=None,
            kernel_initializer=tf.random_uniform_initializer(-3e-3, 3e-3),
            kernel_regularizer=l2_regularizer(scale=1e-2), name='adv')
        print adv.shape
        hid2 = layers.dense(
            inputs=hid1, units=256, activation=tf.nn.relu,
            kernel_regularizer=l2_regularizer(scale=1e-2), name='hid2_v')
        print hid2.shape
        v = layers.dense(
            inputs=hid2, units=1, activation=None,
            kernel_initializer=tf.random_uniform_initializer(-3e-3, 3e-3),
            kernel_regularizer=l2_regularizer(scale=1e-2), name='v')
        print v.shape
        q = tf.add(adv, v, name='q')
        print q.shape

    return {"q": q}
def f_net(inputs):
    """
    action_num is set 5.
    :param inputs:
    :return:
    """
    inputs = inputs[0]
    inputs = inputs/128 - 1.0
    action_num = 5
    l2 = 1e-4
    # (350, 350, 3*n) -> ()
    conv1 = layers.conv2d(
        inputs=inputs, filters=16, kernel_size=(8, 8), strides=1,
        kernel_regularizer=l2_regularizer(scale=l2),
        activation=tf.nn.relu, name='conv1')
    print conv1.shape
    pool1 = layers.max_pooling2d(
        inputs=conv1, pool_size=3, strides=4, name='pool1')
    print pool1.shape
    conv2 = layers.conv2d(
        inputs=pool1, filters=16, kernel_size=(5, 5), strides=1,
        kernel_regularizer=l2_regularizer(scale=l2),
        activation=tf.nn.relu, name='conv2')
    print conv2.shape
    pool2 = layers.max_pooling2d(
        inputs=conv2, pool_size=3, strides=3, name='pool2')
    print pool2.shape
    conv3 = layers.conv2d(
         inputs=pool2, filters=64, kernel_size=(3, 3), strides=1,
         kernel_regularizer=l2_regularizer(scale=l2),
         activation=tf.nn.relu, name='conv3')
    print conv3.shape
    pool3 = layers.max_pooling2d(
        inputs=conv3, pool_size=3, strides=2, name='pool3',)
    print pool3.shape
    depth = pool3.get_shape()[1:].num_elements()
    inputs = tf.reshape(pool3, shape=[-1, depth])
    inputs = tf.stop_gradient(inputs)
    print inputs.shape
    hid1 = layers.dense(
        inputs=inputs, units=256, activation=tf.nn.relu,
        kernel_regularizer=l2_regularizer(scale=l2), name='hid1')
    print hid1.shape
    hid2 = layers.dense(
        inputs=hid1, units=256, activation=tf.nn.relu,
        kernel_regularizer=l2_regularizer(scale=l2), name='hid2')
    print hid2.shape
    pi = layers.dense(
        inputs=hid2, units=action_num, activation=tf.nn.softmax,
        kernel_regularizer=l2_regularizer(scale=l2), name='pi')
    pi = tf.stop_gradient(pi)
    q = layers.dense(inputs=hid2, units=action_num, kernel_regularizer=l2_regularizer(scale=l2), name='q')
    return {"pi": pi, "q": q}
示例#9
0
def model(inputs, is_training, data_format, num_classes):
    net = conv2d(inputs=inputs,
                 filters=96,
                 kernel_size=[7, 7],
                 strides=2,
                 padding='valid',
                 data_format=data_format,
                 activation=tf.nn.relu,
                 use_bias=True,
                 kernel_initializer=tf.variance_scaling_initializer(),
                 bias_initializer=tf.zeros_initializer())
    net = max_pooling2d(inputs=net,
                        pool_size=[3, 3],
                        strides=2,
                        data_format=data_format)
    net = fire_module(net, 16, 64, data_format)
    net = fire_module(net, 16, 64, data_format)
    net = fire_module(net, 32, 128, data_format)
    net = max_pooling2d(inputs=net,
                        pool_size=[3, 3],
                        strides=2,
                        data_format=data_format)
    net = fire_module(net, 32, 128, data_format)
    net = fire_module(net, 48, 192, data_format)
    net = fire_module(net, 48, 192, data_format)
    net = fire_module(net, 64, 256, data_format)
    net = max_pooling2d(inputs=net,
                        pool_size=[3, 3],
                        strides=2,
                        data_format=data_format)
    net = fire_module(net, 64, 256, data_format)
    net = dropout(inputs=net, rate=0.5, training=is_training)
    net = conv2d(
        inputs=net,
        filters=num_classes,
        kernel_size=[1, 1],
        strides=1,
        padding='valid',  # no padding eqv. to pad=1 for 1x1 conv?
        data_format=data_format,
        activation=tf.nn.relu,
        use_bias=True,
        kernel_initializer=tf.initializers.random_normal(mean=0.0,
                                                         stddev=0.01),
        bias_initializer=tf.zeros_initializer())
    net = average_pooling2d(inputs=net,
                            pool_size=[13, 13],
                            strides=1,
                            data_format=data_format)

    # TODO fix for data_format later
    logits = tf.squeeze(net, [2, 3])

    return logits
def generator_net(inputs, scope, reuse=None, rgb=False):
	
	output_channels = 3 if rgb else 1
	
	with tf.variable_scope(scope, reuse=reuse):
	
		# branch  1 ( color reconstruction)
		
		cv1   = conv2d(inputs, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_i')
		cv1_r = leaky_relu(cv1)
		
		res1_c = conv2d(cv1_r, filters=16, kernel_size=5, strides=1, padding='same', activation=None, name='conv3a_1')
		res1_b = batch_normalization(res1_c)
		res1_r = leaky_relu(res1_b)
		
		res1_d = conv2d(res1_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_1')
		res1   = batch_normalization(res1_d)
		
		sum1  = cv1 + res1
		
		res2_c = conv2d(sum1, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_2')
		res2_b = batch_normalization(res2_c)
		res2_r = leaky_relu(res2_b)
		
		res2_d = conv2d(res2_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_2')
		res2   = batch_normalization(res2_d)
		
		br1 = sum1 + res2
		
		
		# branch 2 (features extraction)
		br2 = conv2d(inputs, filters=16, kernel_size=5, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf1')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool1')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf2')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2a')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf3')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2')
		
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_1")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_2")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_3")
		print(br2.shape)
		
		# concatenate branches and reconstruct image
		sum3 = tf.concat((br1, br2), axis=3);
		model = conv2d(sum3, filters=output_channels, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_f')
		
		return model
示例#11
0
def create_resnet(x, num_blocks, num_outputs, training, activation=tf.nn.elu, output_activation=None, bottleneck=False, name=None, reuse=False):
    create_block = create_bottleneck_block if bottleneck else create_resnet_block
    unit = 256 if bottleneck else 64
    name = "ResNET" if name is None else name

    with tf.variable_scope(name, reuse=reuse):
        b = layers.conv2d(x, 64, kernel_size=7, strides=2, activation=activation, padding="SAME", kernel_initializer=xavier_initializer())
        b = layers.max_pooling2d(b, pool_size=3, strides=2, padding="SAME")

        for i, num_repeats in enumerate(num_blocks):
            b = create_block(b, unit*2**i, training, activation, skip_connection=True)

            for _ in range(num_repeats-1):
                b = create_block(b, unit*2**i, training, activation)

            b = downsample(b, unit*2**(i+1), training)

        # use global average pooling
        b = layers.conv2d(b, num_outputs, kernel_size=1, activation=None, padding="SAME")
        fts = tf.reduce_mean(b, [1, 2])

        if output_activation is not None:
            fts = output_activation(fts)

    return fts
示例#12
0
    def __call__(self):
        """ Builds the network. """
        x = conv2d(self.img_input, self.nb_filter, self.initial_kernel, kernel_initializer='he_normal', padding='same',
                   strides=self.initial_strides, use_bias=False, **self.conv_kwargs)

        if self.subsample_initial_block:
            x = batch_normalization(x, **self.bn_kwargs)
            x = tf.nn.relu(x)
            x = max_pooling2d(x, (3, 3), data_format=self.data_format, strides=(2, 2), padding='same')

        # Add dense blocks
        nb_filter = self.nb_filter
        for block_idx in range(self.nb_dense_block - 1):
            with tf.variable_scope('denseblock_{}'.format(block_idx)):
                x, nb_filter = self._dense_block(x, self.nb_layers[block_idx], nb_filter)
                # add transition_block
                x = self._transition_block(x, nb_filter)
                nb_filter = int(nb_filter * self.compression)

        # The last dense_block does not have a transition_block
        x, nb_filter = self._dense_block(x, self.final_nb_layer, self.nb_filter)

        x = batch_normalization(x, **self.bn_kwargs)
        x = tf.nn.relu(x)

        x = GlobalAveragePooling2D(data_format=self.data_format)(x)

        if self.include_top:
            x = dense(x, self.nb_classes)

        return x
示例#13
0
def inceptionBlock(X_in, c1, c3_r, c3, c5_r, c5, p3_r, ns=None):
    """ inception building block
    Args:
        X_in the input tensor
        c1...p3_r conv layer filter size of inception,
                    '_r' means reduction 1x1 conv layer
        ns name scope
    """
    global inceptionCount
    inceptionCount += 1
    if ns is None:
        ns = "inception{}".format(inceptionCount)

    with tf.name_scope(ns):
        conv1 = conv2d(X_in, c1, 1, padding='SAME')

        conv3 = conv2d(X_in, c3_r, 1, padding='SAME')
        conv3 = tf.nn.relu(conv3)
        conv3 = conv2d(conv3, c3, 3, padding='SAME')

        conv5 = conv2d(X_in, c5_r, 1, padding='SAME')
        conv5 = tf.nn.relu(conv5)
        conv5 = conv2d(conv5, c5, 5, padding='SAME')

        maxpool = max_pooling2d(X_in, 3, 1, padding='SAME')
        maxpool = conv2d(maxpool, p3_r, 1, padding='SAME')

        logits = tf.concat([conv1, conv3, conv5, maxpool], 3)
        logits = tf.nn.relu(logits)

    return logits
示例#14
0
    def __init__(self, state_size, action_size):
        self.states = tf.placeholder(tf.float32,
                                     shape=[None, *state_size],
                                     name="states")
        self.labels = tf.placeholder(
            tf.int32, shape=[None, 1],
            name="labels")  # size 1 because sparse loss is used.

        # conv1 = layers.conv2d(self.states, filters=16, kernel_size=(8, 8), strides=(4, 4), activation='relu',
        #                       name="conv1"),
        # conv2 = layers.conv2d(conv1, filters=32, kernel_size=(4, 4), strides=(2, 2), activation='relu', name="conv2"),
        # flatten = layers.flatten(conv2),
        # dense = layers.dense(flatten, 256, activation='relu', name="features"),
        #
        # self.logits = layers.dense(dense, action_size, name="logits")
        # self.value = layers.dense(dense, 1, name="values")

        # conv1 = conv2d(self.states, filters=32, kernel_size=(3, 3), name='conv1')

        with tf.variable_scope('layers'):
            conv1 = layers.conv2d(self.states,
                                  filters=32,
                                  kernel_size=(3, 3),
                                  activation='relu',
                                  name='conv1')
            conv2 = layers.conv2d(conv1,
                                  filters=64,
                                  kernel_size=(3, 3),
                                  activation='relu',
                                  name='conv2')
            max_pool = layers.max_pooling2d(conv2, 2, 1, name='max_pool')
            drop_1 = layers.dropout(max_pool, 0.25, name='drop1')
            flatten = layers.flatten(drop_1, name='flatten')
            dense = layers.dense(flatten, 128, activation='relu', name='dense')
            drop2 = layers.dropout(dense, 0.5, name='drop2')
            logits = layers.dense(drop2,
                                  action_size,
                                  activation='softmax',
                                  name='logits')
            self.output = tf.nn.softmax(logits, name='output')
        # tf.one_hot(tf.arg_max(self.output, 1), depth=10)
        print(tf.arg_max(self.output, 1))
        self.test = tf.one_hot(tf.arg_max(self.output, 1), depth=10)
        # input()
        self.cost = tf.losses.sparse_softmax_cross_entropy(self.labels, logits)
        self.acc, self.acc_op = tf.metrics.accuracy(self.labels,
                                                    tf.arg_max(self.output, 1))

        # self.grad = tf.gradients(self.cost, self.states, stop_gradients=[self.states])
        self.grad = tf.gradients(self.cost, tf.trainable_variables())

        self.optimizer = tf.train.AdamOptimizer()
        # print(tf.trainable_variables())
        # print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='layers'))
        # print(self.grad)
        self.apply_grad = self.optimizer.apply_gradients(
            zip(
                self.grad,
                tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope='layers')))
 def mixed_7a(self, x):
     with tf.variable_scope("mixed_7a"):
         with tf.variable_scope("branch0"):
             x0 = self.basic_conv2d(x,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding="same",
                                    namescope="conv1",
                                    use_bias=False)
             x0 = self.basic_conv2d(x0,
                                    384,
                                    kernel_size=3,
                                    stride=2,
                                    padding="same",
                                    namescope="conv2",
                                    use_bias=False)
         with tf.variable_scope("branch1"):
             x1 = self.basic_conv2d(x,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding="same",
                                    namescope="conv1",
                                    use_bias=False)
             x1 = self.basic_conv2d(x1,
                                    288,
                                    kernel_size=3,
                                    stride=2,
                                    padding="same",
                                    namescope="conv2",
                                    use_bias=False)
         with tf.variable_scope("branch2"):
             x2 = self.basic_conv2d(x,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding="same",
                                    namescope="conv1",
                                    use_bias=False)
             x2 = self.basic_conv2d(x2,
                                    288,
                                    kernel_size=3,
                                    stride=1,
                                    padding="same",
                                    namescope="conv2",
                                    use_bias=False)
             x2 = self.basic_conv2d(x2,
                                    320,
                                    kernel_size=3,
                                    stride=2,
                                    padding="same",
                                    namescope="conv3",
                                    use_bias=False)
         with tf.variable_scope("branch3"):
             x3 = layers.max_pooling2d(x, 3, strides=2, padding="same")
         x = tf.concat([x0, x1, x2, x3], axis=-1)
         x = layers.dropout(x, noise_shape=[None, 1, 1, None])
     return x
def f_net(inputs, num_outputs, is_training):
    inputs = inputs/128 - 1.0
    conv1 = layers.conv2d(
        inputs=inputs, filters=16, kernel_size=(8, 8), strides=1,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='conv1'
    )
    pool1 = layers.max_pooling2d(
        inputs=conv1, pool_size=3, strides=4, name='pool1'
    )
    conv2 = layers.conv2d(
        inputs=pool1, filters=16, kernel_size=(5, 5), strides=1,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='conv2'
    )
    pool2 = layers.max_pooling2d(
        inputs=conv2, pool_size=3, strides=3, name='pool2'
    )
    conv3 = layers.conv2d(
         inputs=pool2, filters=64, kernel_size=(3, 3), strides=1,
         kernel_regularizer=l2_regularizer(scale=1e-2), name='conv3'
    )
    pool3 = layers.max_pooling2d(
        inputs=conv3, pool_size=3, strides=8, name='pool3',
    )
    conv4 = layers.conv2d(
        inputs=pool3, filters=64, kernel_size=(3, 3), strides=1,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='conv4'
    )
    pool4 = layers.max_pooling2d(
        inputs=conv4, pool_size=3, strides=8, name='pool4'
    )
    depth = pool4.get_shape()[1:].num_elements()
    inputs = tf.reshape(pool4, shape=[-1, depth])
    hid1 = layers.dense(
        inputs=inputs, units=256, activation=tf.nn.relu,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='hid1'
    )
    hid2 = layers.dense(
        inputs=hid1, units=256, activation=tf.nn.relu,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='hid2'
    )
    q = layers.dense(
        inputs=hid2, units=num_outputs, activation=None,
        kernel_regularizer=l2_regularizer(scale=1e-2), name='q'
    )
    q = tf.squeeze(q, name='out_sqz')
    return q
示例#17
0
def net(inputs, is_training):

    x = tf.reshape(inputs, [-1, 28, 28, 1])

    dropout_params = {
        'init_min': 0.1,
        'init_max': 0.1,
        'weight_regularizer': 1e-6,
        'dropout_regularizer': 1e-5,
        'training': is_training
    }
    x, reg = concrete_dropout(x, name='conv1_dropout', **dropout_params)
    x = tfl.conv2d(x,
                   32,
                   5,
                   activation=tf.nn.relu,
                   padding='SAME',
                   kernel_regularizer=reg,
                   bias_regularizer=reg,
                   name='conv1')
    x = tfl.max_pooling2d(x, 2, 2, padding='SAME', name='pool1')

    x, reg = concrete_dropout(x, name='conv2_dropout', **dropout_params)
    x = tfl.conv2d(x,
                   64,
                   5,
                   activation=tf.nn.relu,
                   padding='SAME',
                   kernel_regularizer=reg,
                   bias_regularizer=reg,
                   name='conv2')
    x = tfl.max_pooling2d(x, 2, 2, padding='SAME', name='pool2')

    x = tf.reshape(x, [-1, 7 * 7 * 64], name='flatten')
    x, reg = concrete_dropout(x, name='fc1_dropout', **dropout_params)
    x = tfl.dense(x,
                  1024,
                  activation=tf.nn.relu,
                  name='fc1',
                  kernel_regularizer=reg,
                  bias_regularizer=reg)

    outputs = tfl.dense(x, 10, name='fc2')
    return outputs
示例#18
0
    def model_old(self):  # not use roi pooling
        h_00 = tl.batch_normalization(tf.cast(self.m_image_data,
                                              tf.float32))  # 256*256

        h_01 = tl.conv2d(h_00, 8, 3)  # 256*256
        h_02 = tl.conv2d(h_01, 8, 3)  # 256*256
        h_03 = tl.max_pooling2d(h_02)  # 128*128

        h_04 = tl.conv2d(h_03, 16, 3)  # 128*128
        h_05 = tl.conv2d(h_04, 16, 3)  # 128*128
        h_06 = tl.max_pooling2d(h_05)  # 64*64

        h_07 = tl.conv2d(h_06, 32, 3)  # 64*64
        h_08 = tl.conv2d(h_07, 32, 3)  # 64*64
        h_09 = tl.max_pooling2d(h_08)  # 32*32

        h_10 = tl.conv2d(h_09, 32, 3)  # 32*32
        h_11 = tl.conv2d(h_10, 32, 3)  # 32*32
        h_12 = tl.max_pooling2d(h_11)  # 16*16

        conv_predict_label = tl.conv2d(h_12,
                                       len(self.config.anchors) * 2,
                                       1,
                                       activation_fn=None)
        conv_predict_proposal_regress = tl.conv2d(h_12,
                                                  len(self.config.anchors) * 4,
                                                  1,
                                                  activation_fn=None)

        predict_label_logits = tf.reshape(conv_predict_label,
                                          [-1, self.config.sample_size, 2])
        self.predict_label = tf.nn.softmax(predict_label_logits)
        self.predict_proposal_regress = tf.reshape(
            conv_predict_proposal_regress, [-1, self.config.sample_size, 4])

        self.loss_label = tf.losses.softmax_cross_entropy(
            self.m_label, predict_label_logits, weights=self.m_label_weight)
        self.loss_proposal_regress = tf.losses.mean_squared_error(
            self.m_proposal_regress,
            self.predict_proposal_regress,
            weights=self.m_proposal_regress_weight)

        self.loss = self.loss_label + self.loss_proposal_regress
        return self.predict_label, self.predict_proposal_regress, self.loss
示例#19
0
def DownSampler(input, num_output):
	'''
	下采样单元,用卷积层和池化层拼接即可
	num_output: 输出的特征数
	'''

	net = conv2d(input, num_output, kernel_size = (3, 3), strides = (1, 1), padding = "SAME")
	net = max_pooling2d(net, pool_size = (2,2), strides = 2)

	return net
示例#20
0
        def vgg(inputs, ys, knn_inputs, knn_ys):
            # block 1
            x = conv2d(inputs, filters=64, kernel_size=(3, 3), padding='same', activation=ReLU)
            x = conv2d(x, filters=64, kernel_size=(3, 3), padding='same', activation=ReLU)
            x = max_pooling2d(x, pool_size=(2, 2), strides=(2, 2))

            # block 2
            x = conv2d(x, filters=128, kernel_size=(3, 3), padding='same', activation=ReLU)
            x = conv2d(x, filters=128, kernel_size=(3, 3), padding='same', activation=ReLU)
            x = max_pooling2d(x, pool_size=(2, 2), strides=(2, 2))

            # final
            x = flatten(x)
            x = dense(x, units=1024, activation=ReLU)
            x = dropout(x, rate=0.5, training=)
            x = dense(x, units=1024, activation=ReLU)
            x = dense(x, units=self.data_loader.num_outputs)

            return x
示例#21
0
def lenet5_2d(x, keep_prob, is_train, conf):
    """modified LeNet5 (ReLU instead sigmoid, dropout after FC layers)"""
    def _dense(inputs, neurons, name, activation=tf.nn.relu):
        """wrapper function for dense layer"""
        w_ini, b_ini, r_ini = initializers(conf.bias_init, conf.l2_str)
        return dense(inputs,
                     neurons,
                     activation=activation,
                     bias_initializer=b_ini,
                     kernel_initializer=w_ini,
                     kernel_regularizer=r_ini,
                     name=name)

    def _conv2d(inputs, n_filters, size, name, padding='VALID'):
        """wrapper function for 2D convolutional layer"""
        w_ini, b_ini, r_ini = initializers(conf.bias_init, conf.l2_str)
        return conv2d(inputs,
                      n_filters,
                      size,
                      padding=padding,
                      activation=tf.nn.relu,
                      bias_initializer=b_ini,
                      kernel_initializer=w_ini,
                      kernel_regularizer=r_ini,
                      name=name)

    # split signals according to length and channels
    x_multichannel = tf.reshape(
        x, [-1, conf.img_dim[0], conf.img_dim[1], conf.num_ch])
    # inference
    net = _conv2d(x_multichannel, 6, 5, 'conv1', padding='SAME')
    net = max_pooling2d(net, [2, 2], 2, name='pool1')
    net = _conv2d(net, 16, 5, 'conv2')
    net = max_pooling2d(net, [2, 2], 2, name='pool2')
    net = flatten(net, 'flat_layer')
    net = _dense(net, 120, 'fc1')
    net = tf.nn.dropout(net, keep_prob, name='dropout1')
    net = _dense(net, 84, 'fc2')
    net = tf.nn.dropout(net, keep_prob, name='dropout2')
    # output layer (softmax applied at loss function)
    logits = _dense(net, conf.num_cl, activation=None, name='logits')
    return logits
示例#22
0
 def __init__(self, images, params, reuse=False):
     with tf.variable_scope("RGBNetwork", reuse=reuse) as scope:
         x = tf.reshape(images, [-1, 200, 200, 3])
         x = conv2d(x, 64, (10, 10), strides=(4, 4), padding='same')
         x = leaky_relu(x)
         x = max_pooling2d(x, (5, 5), strides=(2, 2), padding='same')
         x = conv2d(x, 128, (2, 2), strides=(2, 2), padding='same')
         x = leaky_relu(x)
         x = max_pooling2d(x, (2, 2), strides=(2, 2), padding='same')
         x = conv2d(x, 256, (3, 3), strides=(1, 1), padding='same')
         x = leaky_relu(x)
         x = conv2d(x, 512, (3, 3), strides=(1, 1), padding='same')
         x = leaky_relu(x)
         x = flatten(x)
         x = dense(x, 256, activation=leaky_relu)
         x = dense(x, 200 * 200, activation=None)
         x = tf.reshape(x, [-1, 200, 200])
         self.output = tf.cast(x, tf.float32)
     self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope="SimpleNetwork")
示例#23
0
    def forward(self, x):
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            # 2. Common Networks Layers
            self.conv1 = nn.conv2d(inputs=x,
                                   filters=28,
                                   kernel_size=[3, 3],
                                   activation=tf.nn.relu)
            self.conv1 = nn.max_pooling2d(self.conv1, pool_size=3, strides=2)
            self.conv2 = nn.conv2d(inputs=self.conv1,
                                   filters=48,
                                   kernel_size=[3, 3],
                                   activation=tf.nn.relu)
            self.conv2 = nn.max_pooling2d(self.conv2, pool_size=3, strides=2)
            self.conv3 = nn.conv2d(inputs=self.conv2,
                                   filters=64,
                                   kernel_size=[2, 2],
                                   activation=tf.nn.relu)
            # self.conv4 = nn.conv2d(inputs=self.conv3, filters=64, kernel_size=[2, 2], activation=tf.nn.relu)
            # self.conv5_1 = nn.conv2d(inputs=self.conv4, filters=1, kernel_size=[1, 1], activation=tf.nn.sigmoid)
            # self.conv5_2 = nn.conv2d(inputs=self.conv4, filters=4, kernel_size=[1, 1])
            #
            # return self.conv5_1, self.conv5_2

            # This is a wrong method, because the batch number is unknown
            # self.conv_flat = tf.reshape(self.conv3, [self.conv3.get_shape()[0], -1])
            self.conv_flat = tf.reshape(self.conv3, (-1, 2 * 2 * 64))
            self.fc1 = tf.layers.dense(inputs=self.conv_flat,
                                       units=128,
                                       activation=tf.nn.relu)
            self.fc1 = tf.layers.dense(inputs=self.fc1,
                                       units=64,
                                       activation=tf.nn.relu)
            self.fc1 = tf.layers.dense(inputs=self.fc1,
                                       units=32,
                                       activation=tf.nn.relu)
            self.fc2_1 = tf.layers.dense(inputs=self.fc1,
                                         units=1,
                                         activation=tf.nn.sigmoid)
            self.fc2_2 = tf.layers.dense(inputs=self.fc1, units=4)

        return self.fc2_1, self.fc2_2
示例#24
0
    def inference(self, input):

        conv = conv2d(input,
                      self.noutput,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="SAME")
        maxpool = max_pooling2d(input, pool_size=(2, 2), strides=2)
        concat = tf.concat([conv, maxpool], -1)
        bn = batch_normalization(concat, self.training)

        return tf.nn.relu(bn)
def classifier_net(inputs, scope, reuse=None):
	
	with tf.variable_scope(scope, reuse=reuse):
		
		net = conv2d(inputs, filters=32, kernel_size=5, strides=1, activation=tf.nn.leaky_relu, name='conv1')
		net = conv2d(net, filters=64, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv2')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool1')
		net = conv2d(net, filters=64, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv3')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool2a')
		net = conv2d(net, filters=64, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv4')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool2')
		net = conv2d(net, filters=64, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv5')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool3a')
		net = conv2d(net, filters=64, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv6')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool3')
		net = conv2d(net, filters=32, kernel_size=3, strides=1, activation=tf.nn.leaky_relu, name='conv7')
		net = max_pooling2d(net, pool_size=2, strides=2, padding='same', name='maxpool4')
		net = flatten(net, name='flatten')
		# net = dense(net, 1024, activation=tf.nn.leaky_relu, name='dense1')
		# net = batch_normalization(net)
		net = dense(net, 512, activation=tf.nn.leaky_relu, name='dense2')
		# net = batch_normalization(net)
		net = dense(net, 256, activation=tf.nn.leaky_relu, name='dense3')
		# net = batch_normalization(net)
		net = dense(net, 128, activation=tf.nn.leaky_relu, name='dense4')
		net = dense(net, 1, activation=tf.nn.sigmoid, name='out')

		return net
示例#26
0
def cnn_model(features, mode):
    """
    cnn model structure
    :param features: images
    :return: predicts
    """
    input_layer = tf.reshape(features, shape=[-1, 28, 28, 1], name='input')
    # conv1
    # trainable can change in middle of training
    conv1 = layers.conv2d(inputs=input_layer, filters=32, kernel_size=[3, 3], padding="same", activation=tf.nn.relu,
                          name='conv1')
    pool1 = layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, name='pool1')
    # conv2
    conv2 = layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu,
                          name='conv2')
    pool2 = layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, name='pool2')
    # fully connected layer
    pool2_flat = layers.flatten(pool2, name='flatten')
    dense = layers.dense(inputs=pool2_flat, units=512, activation=tf.nn.relu, name='dense_layer')
    dropout = layers.dropout(inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN), name='dropout')
    # output layer
    logits = tf.layers.dense(inputs=dropout, units=10, name='logits')
    return logits
示例#27
0
 def _squeezenet(images, num_classes=1000):
     #print("image",images)
     net = conv2d(images, 96, [7, 7], strides=2, padding='SAME', kernel_regularizer=regularizer, activation=tf.nn.relu)
     net = max_pooling2d(net, [3, 3], strides=2)
     #print("@here ",net)
     net = fire_module(net, 16, 64, scope='fire2')
     net = fire_module(net, 16, 64, scope='fire3')
     net = fire_module(net, 32, 128, scope='fire4')
     net = max_pooling2d(net, [3, 3], strides=2)
     net = fire_module(net, 32, 128, scope='fire5')
     net = fire_module(net, 48, 192, scope='fire6')
     net = fire_module(net, 48, 192, scope='fire7')
     net = fire_module(net, 64, 256, scope='fire8')
     net = max_pooling2d(net, [3, 3], strides=2)
     net = fire_module(net, 64, 256, scope='fire9')
     #print("@here fire_module ",net)
     net = conv2d(net, num_classes, [1, 1], strides=1, padding='SAME')
     #print("@here conv2d ",net)
     net = max_pooling2d(net, [13, 13], strides=1)
     #print("@here pooling ",net)
     logits = tf.squeeze(net, [1,2], name='logits')
     #print("@here logits ",logits)
     return logits
示例#28
0
    def build(self, input):
        self.conv1 = self.conv_block(input, 64, "conv1")
        pool1 = max_pooling2d(self.conv1, pool_size=(2, 2), strides=2)
        drop1 = dropout(pool1, .25)
        self.conv2 = self.conv_block(drop1, 128, 'conv2')
        pool2 = max_pooling2d(self.conv2, pool_size=(2, 2), strides=2)
        drop2 = dropout(pool2, .25)
        self.conv3 = self.conv_block(drop2, 256, 'conv3')
        pool3 = max_pooling2d(self.conv3, pool_size=(2, 2), strides=2)
        drop3 = dropout(pool3, .25)
        self.conv4 = self.conv_block(drop3, 512, 'conv4')
        pool4 = max_pooling2d(self.conv4, pool_size=(2, 2), strides=2)
        drop4 = dropout(pool4, .25)

        self.conv5 = conv2d(drop4,
                            1024,
                            kernel_size=(3, 3),
                            activation=tf.nn.relu,
                            padding='SAME')
        self.conv5_2 = conv2d(self.conv5,
                              1024,
                              kernel_size=(3, 3),
                              activation=tf.nn.relu,
                              padding='SAME')

        self.deconv4 = self.deconv_block(self.conv5_2, 512, self.conv4, "SAME",
                                         'deconv4')
        self.deconv3 = self.deconv_block(self.deconv4, 256, self.conv3,
                                         "VALID", 'deconv3')
        self.deconv2 = self.deconv_block(self.deconv3, 128, self.conv2, "SAME",
                                         'deconv2')
        self.deconv1 = self.deconv_block(self.deconv2, 64, self.conv1, "VALID",
                                         'deconv1')
        self.output = conv2d(self.deconv1,
                             filters=1,
                             kernel_size=1,
                             name='logits')
示例#29
0
 def forward(self, x, is_training):
     # Assumes [Batch, Time, Freq, Chan]
     self.inputs = layers.conv2d(
         x, filters=128, kernel_size=[3, 3], strides=1,
         activation=tf.nn.relu
     )
     print(x.shape)
     x = layers.conv2d(
         x, filters=128, kernel_size=[3, 3], strides=1,
         activation=tf.nn.relu
     )
     print(x.shape)
     x = layers.max_pooling2d(
         x, pool_size=[3, 3], strides=[3, 3]
     )
     print(x.shape)
     x = layers.conv2d(
         x, filters=256, kernel_size=[3, 3], strides=1,
         activation=tf.nn.relu
     )
     print(x.shape)
     x = layers.conv2d(
         x, filters=256, kernel_size=[3, 3], strides=1,
         activation=tf.nn.relu
     )
     print(x.shape)
     x = layers.max_pooling2d(
         x, pool_size=[3, 3], strides=[3, 3]
     )
     print(x.shape)
     x = tf.contrib.layers.flatten(x)
     print(x.shape)
     self.raw_scores = layers.dense(  # Linear map to output
         inputs=x, units=self.FLAGS.num_classes, activation=None
     )
     return self.raw_scores
    def _encode_brick(incoming,
                      nb_filters,
                      is_training,
                      scope,
                      use_bn=True,
                      trainable=True):
        """ Encoding brick: conv --> conv --> max pool.
        """
        with tf.variable_scope(scope):
            conv1 = layers.conv2d(incoming,
                                  filters=nb_filters,
                                  kernel_size=3,
                                  strides=1,
                                  padding='same',
                                  kernel_initializer=he_init,
                                  bias_initializer=b_init,
                                  trainable=trainable)

            if use_bn:
                conv1 = layers.batch_normalization(conv1,
                                                   training=is_training,
                                                   trainable=trainable)
            conv1_act = tf.nn.relu(conv1)

            conv2 = layers.conv2d(conv1_act,
                                  filters=nb_filters,
                                  kernel_size=3,
                                  strides=1,
                                  padding='same',
                                  kernel_initializer=he_init,
                                  bias_initializer=b_init,
                                  trainable=trainable)

            if use_bn:
                conv2 = layers.batch_normalization(conv2,
                                                   training=is_training,
                                                   trainable=trainable)
            conv2_act = tf.nn.relu(conv2)

            pool = layers.max_pooling2d(conv2_act,
                                        pool_size=2,
                                        strides=2,
                                        padding='same')

            with tf.variable_scope('concat_layer_out'):
                concat_layer_out = conv2_act
        return pool, concat_layer_out