Esempio n. 1
0
def inference_resnet(dataset):
    dataset_reshaped = tf.reshape(dataset, [-1, 28, 28, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable([5, 5, 1, 32], name="W_conv1")
        bias1 = utils.bias_variable([32], name="bias1")
        tf.histogram_summary("W_conv1", W_conv1)
        tf.histogram_summary("bias1", bias1)
        h_conv1 = tf.nn.relu(
            utils.conv2d_basic(dataset_reshaped, W_conv1, bias1))
        h_norm1 = utils.local_response_norm(h_conv1)

    bottleneck_1 = utils.bottleneck_unit(h_norm1,
                                         32,
                                         32,
                                         down_stride=True,
                                         name="res1")
    bottleneck_2 = utils.bottleneck_unit(bottleneck_1,
                                         64,
                                         64,
                                         down_stride=True,
                                         name="res2")

    with tf.name_scope("fc1") as scope:
        h_flat = tf.reshape(bottleneck_2, [-1, 7 * 7 * 64])
        W_fc1 = utils.weight_variable([7 * 7 * 64, 10], name="W_fc1")
        bias_fc1 = utils.bias_variable([10], name="bias_fc1")
        tf.histogram_summary("W_fc1", W_fc1)
        tf.histogram_summary("bias_fc1", bias_fc1)
        logits = tf.matmul(h_flat, W_fc1) + bias_fc1

    return logits
def inference_res(input_image):
    W1 = utils.weight_variable([3, 3, 3, 32])
    b1 = utils.bias_variable([32])
    hconv_1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))
    h_norm = utils.local_response_norm(hconv_1)
    bottleneck_1 = utils.bottleneck_unit(h_norm, 16, 16, down_stride=True, name="res_1")
    bottleneck_2 = utils.bottleneck_unit(bottleneck_1, 8, 8, down_stride=True, name="res_2")
    bottleneck_3 = utils.bottleneck_unit(bottleneck_2, 16, 16, up_stride=True, name="res_3")
    bottleneck_4 = utils.bottleneck_unit(bottleneck_3, 32, 32, up_stride=True, name="res_4")
    W5 = utils.weight_variable([3, 3, 32, 3])
    b5 = utils.bias_variable([3])
    out = tf.nn.tanh(utils.conv2d_basic(bottleneck_4, W5, b5))
    return out
Esempio n. 3
0
def res_net(weights, image):
    layers = ('conv1', 'res2a', 'res2b', 'res2c', 'res3a', 'res3b1', 'res3b2',
              'res3b3', 'res4a', 'res4b1', 'res4b2', 'res4b3', 'res4b4',
              'res4b5', 'res4b6', 'res4b7', 'res4b8', 'res4b9', 'res4b10',
              'res4b11', 'res4b12', 'res4b13', 'res4b14', 'res4b15', 'res4b16',
              'res4b17', 'res4b18', 'res4b19', 'res4b20', 'res4b21', 'res4b22',
              'res5a', 'res5b', 'res5c')
    res_branch1_parm = {
        #branch1:padding(if 0 then 'VALID' else 'SMAE'),stride
        'res2a': [0, 1, 0, 1, 1, 1, 0, 1],
        'res3a': [0, 2, 0, 2, 1, 1, 0, 1],
        'res4a': [0, 2, 0, 2, 1, 1, 0, 1],
        'res5a': [0, 2, 0, 2, 1, 1, 0, 1]
    }

    net = {}
    current = image
    n = 0
    for i, name in enumerate(layers):
        kind = name[:3]
        print('-----------------layer: %s-----------------------' % name)
        print('Input: ', current.shape)
        #convolutional
        if kind == 'con':
            kernels, bias, n = get_kernel_bias(n, weights, name)
            print('kernel size: ', kernels.shape, 'bias size', bias.shape)
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            current = utils.conv2d_strided(current, kernels, bias)
            current = utils.max_pool_2x2(current, 3)
        #resnet
        elif kind == 'res':
            sub_kind = name[4]
            #not blockneck
            if sub_kind == 'a':
                res_param = res_branch1_parm[name]
                branch1_name = '%s_%s' % (name, 'branch1')
                branch1_w, branch1_b, out_chan_t, n = get_kernel_bias_res(
                    n, weights, branch1_name, 2)
                print('branch1:kernel size: ', branch1_w.shape, 'bias size',
                      branch1_b.shape)
            else:
                res_param = None
                branch1_w = None
            branch2a_name = '%s_%s' % (name, 'branch2a')
            branch2a_w, branch2a_b, out_chan_t2, n = get_kernel_bias_res(
                n, weights, branch2a_name, 0)
            print('branch2a:kernel size: ', branch2a_w.shape, 'bias size',
                  branch2a_b.shape)
            branch2b_name = '%s_%s' % (name, 'branch2b')
            branch2b_w, branch2b_b, _, n = get_kernel_bias_res(
                n, weights, branch2b_name, 0)
            print('branch2b:kernel size: ', branch2b_w.shape, 'bias size',
                  branch2b_b.shape)
            branch2c_name = '%s_%s' % (name, 'branch2c')
            branch2c_w, branch2c_b, out_chan2, n = get_kernel_bias_res(
                n, weights, branch2c_name, 3)
            print('branch2c:kernel size: ', branch2c_w.shape, 'bias size',
                  branch2c_b.shape)
            if sub_kind == 'a':
                out_chan1 = out_chan_t
            else:
                out_chan1 = out_chan_t2
            current = utils.bottleneck_unit(current, res_param, branch1_w,
                                            branch1_b, branch2a_w, branch2a_b,
                                            branch2b_w, branch2b_b, branch2c_w,
                                            branch2c_b, out_chan1, out_chan2,
                                            False, False, name)
        print('layer output ', current.shape)
        net[name] = current
    current = utils.avg_pool(current, 7, 1)
    print('resnet final sz ', current.shape)
    #return net
    return current