コード例 #1
0
def _setup_net(placeholder, layers, weights, mean_pixel):
    """
    Returns the cnn built with given weights and normalized with mean_pixel
    """
    net = {}
    placeholder -= mean_pixel
    for i, name in enumerate(layers):
        kind = name[:4]
        with tf.variable_scope(name):
            if kind == 'conv':
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet: [width, height, in_channels, out_channels]
                # tensorflow: [height, width, in_channels, out_channels]
                kernels = tf_utils.get_variable(
                    np.transpose(kernels, (1, 0, 2, 3)),
                    name=name + "_w")
                bias = tf_utils.get_variable(
                    bias.reshape(-1),
                    name=name + "_b")
                placeholder = tf_utils.conv2d(placeholder, kernels, bias)
            elif kind == 'relu':
                placeholder = tf.nn.relu(placeholder, name=name)
                tf_utils.add_activation_summary(placeholder, collections=['train'])
            elif kind == 'pool':
                placeholder = tf_utils.max_pool_2x2(placeholder)
            net[name] = placeholder

    return net
コード例 #2
0
def inference(image, keep_prob):
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_path)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer, "pool5")

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6, name="conv6")
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.weight_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7, name="conv7")
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSES], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSES], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8, name="conv8")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, "conv_t1", output_shape=tf.shape(image_net("pool4")))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, "conv_t2", output_shape=tf.shape(image_net("pool3")))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSES], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, "conv_t3", output_shape=deconv_shape3, stride=8)

        annotation_pred = tf.argmax(conv_t3, axis=2, name="prediction")

    return tf.expand_dims(annotation_pred, axi=3), conv_t3
コード例 #3
0
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvet: weights are [width, height, inchannel, outchannel]
            # tensorflow: weights are [height, width, inchannel, outchannel]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias, name)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool2x2(current, name)
        net[name] = current

    return net
コード例 #4
0
ファイル: inference.py プロジェクト: Mooonside/SSD
        tf.summary.scalar('pos_loss', pos_loss)
        tf.summary.scalar('neg_loss', neg_loss)
        tf.summary.scalar('box_loss', box_loss)
        tf.summary.scalar('reg_loss', reg_loss)
        tf.summary.scalar('total_loss', total_loss)
        # tf.summary.scalar('learning_rate', decay_learning_rate)

    with tf.name_scope('summary_vars'):
        for weight in weight_vars:
            add_var_summary(weight)
        for bias in bias_vars:
            add_var_summary(bias)

    with tf.name_scope('summary_activations'):
        for activations in endpoints.keys():
            add_activation_summary(endpoints[activations])

merge_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir, sess.graph)
saver = tf.train.Saver(max_to_keep=3)

sess.run(tf.global_variables_initializer())

# initialize
ckpt = None
if FLAGS.last_ckpt is not None:
    ckpt = tf.train.latest_checkpoint(FLAGS.last_ckpt)
    if ckpt is not None:
        # set up save configuration
        saver.restore(sess, ckpt)
        print('Recovering From {}'.format(ckpt))
コード例 #5
0
ファイル: FCN.py プロジェクト: alexliyang/FCN_Tensorflow
def create_fcn(placeholder, keep_prob, classes):
    """
    Setup the main conv/deconv network
    """
    with tf.variable_scope('inference'):
        vgg_net = create_vgg19(placeholder)
        conv_final = vgg_net['relu5_4']

        output = tf_utils.max_pool_2x2(conv_final)

        conv_shapes = [[7, 7, 512, 4096], [1, 1, 4096, 4096],
                       [1, 1, 4096, classes]]

        for i, conv_shape in enumerate(conv_shapes):
            name = 'conv%d' % (i + 6)
            with tf.variable_scope(name):
                W = tf_utils.weight_variable(conv_shape, name=name + '_w')
                b = tf_utils.bias_variable(conv_shape[-1:], name=name + '_b')
                output = tf_utils.conv2d(output, W, b)
            with tf.variable_scope('relu%d' % (i + 6)):
                if i < 2:
                    output = tf.nn.relu(output)
                    tf_utils.add_activation_summary(output,
                                                    collections=['train'])
                    output = tf.nn.dropout(output, keep_prob=keep_prob)

        pool4 = vgg_net['pool4']
        pool3 = vgg_net['pool3']

        deconv_shapes = [
            tf.shape(pool4),
            tf.shape(pool3),
            tf.stack([
                tf.shape(placeholder)[0],
                tf.shape(placeholder)[1],
                tf.shape(placeholder)[2], classes
            ])
        ]

        W_shapes = [[4, 4, pool4.get_shape()[3].value, classes],
                    [
                        4, 4,
                        pool3.get_shape()[3].value,
                        pool4.get_shape()[3].value
                    ], [16, 16, classes,
                        pool3.get_shape()[3].value]]

        strides = [2, 2, 8]

        for i in range(3):
            name = 'deconv%d' % (i + 1)
            with tf.variable_scope(name):
                W = tf_utils.weight_variable(W_shapes[i], name=name + '_w')
                output = tf_utils.conv2d_transpose(
                    output,
                    W,
                    None,
                    output_shape=deconv_shapes[i],
                    stride=strides[i])
            with tf.variable_scope('skip%d' % (i + 1)):
                if i < 2:
                    output = tf.add(output, vgg_net['pool%d' % (4 - i)])

        prediction = tf.argmax(output, dimension=3, name='prediction')

    return tf.expand_dims(prediction, dim=3), output