예제 #1
0
def mobile_block(x, f1, f2, p, name):
    filters1 = tf.Variable(init_filters(size=[3,3,f1,1], init='alexnet'), dtype=tf.float32, name=name+'_conv_dw')
    filters2 = tf.Variable(init_filters(size=[1,1,f1,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv_pw')

    conv1 = tf.nn.depthwise_conv2d(x, filters1, [1,p,p,1], 'SAME')
    bn1   = batch_norm(conv1, f1, name+'_bn_dw')
    relu1 = tf.nn.relu(bn1)

    conv2 = tf.nn.conv2d(relu1, filters2, [1,1,1,1], 'SAME')
    bn2   = batch_norm(conv2, f2, name+'_bn_pw')
    relu2 = tf.nn.relu(bn2)

    return relu2
예제 #2
0
def block(x, f1, f2, p, name):
    filters1 = tf.Variable(init_filters(size=[3,3,f1,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv1')
    filters2 = tf.Variable(init_filters(size=[3,3,f2,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv2')

    conv1 = tf.nn.conv2d(x, filters1, [1,1,1,1], 'SAME')
    bn1   = batch_norm(conv1, f2, name+'_bn1')
    relu1 = tf.nn.relu(bn1)

    conv2 = tf.nn.conv2d(relu1, filters2, [1,1,1,1], 'SAME')
    bn2   = batch_norm(conv2, f2, name+'_bn2')
    relu2 = tf.nn.relu(bn2)

    pool = tf.nn.avg_pool(relu2, ksize=[1,p,p,1], strides=[1,p,p,1], padding='SAME')

    return pool
def conv(x, f, p, w, name):
    fw, fh, fi, fo = f

    trainable = (w == None)

    if w is not None:
        print('loading %s | trainable %d ' % (name, trainable))
        filters_np = w[name]
        bias_np = w[name + '_bias']
    else:
        print('making %s | trainable %d ' % (name, trainable))
        filters_np = init_filters(size=[fw, fh, fi, fo], init='glorot_uniform')
        bias_np = np.zeros(shape=fo)

    if not (np.shape(filters_np) == f):
        print(np.shape(filters_np), f)
        assert (np.shape(filters_np) == f)

    filters = tf.Variable(filters_np, dtype=tf.float32, trainable=trainable)
    bias = tf.Variable(bias_np, dtype=tf.float32, trainable=trainable)

    conv = tf.nn.conv2d(x, filters, [1, p, p, 1], 'SAME') + bias
    relu = tf.nn.leaky_relu(conv, 0.1)

    return relu
예제 #4
0
def block(x, f1, f2, p):
    filters = tf.Variable(init_filters(size=[3,3,f1,f2], init='alexnet'), dtype=tf.float32)

    conv = tf.nn.conv2d(x, filters, [1,1,1,1], 'SAME')
    bn   = batch_norm(conv, f2)
    relu = tf.nn.relu(bn)
    pool = tf.nn.avg_pool(relu, ksize=[1,p,p,1], strides=[1,p,p,1], padding='SAME')
    return pool
예제 #5
0
파일: layers.py 프로젝트: bcrafton/quantize
    def __init__(self, f1, f2, p):
        self.layer_id = layer.layer_id
        layer.layer_id += 1

        self.f1 = f1
        self.f2 = f2
        self.p = p
        self.f = tf.Variable(init_filters(size=[3, 3, self.f1, self.f2],
                                          init='glorot_uniform'),
                             dtype=tf.float32)
        self.b = tf.Variable(np.zeros(shape=(self.f2)),
                             dtype=tf.float32,
                             trainable=False)
예제 #6
0
def conv(x, f, s, weights, name):
    _, _, _, nfilters = f
    if weights:
        filters = tf.Variable(weights[name], dtype=tf.float32)
        bias = tf.Variable(weights[name + '_bias'], dtype=tf.float32)
        assert (np.shape(filters) == f)
        assert (np.shape(bias)[0] == nfilters)
    else:
        filters = tf.Variable(init_filters(size=f, init='glorot_uniform'),
                              dtype=tf.float32)
        bias = tf.Variable(np.zeros(nfilters), dtype=tf.float32)

    conv = tf.nn.conv2d(x, filters, [1, s, s, 1], 'VALID') + bias
    relu = tf.nn.relu(conv)
    return relu
예제 #7
0
    def __init__(self, shape, relu=True):
        self.layer_id = layer.layer_id
        layer.layer_id += 1

        self.k, _, self.f1, self.f2 = shape
        self.relu = relu

        self.f = tf.Variable(init_filters(
            size=[self.k, self.k, self.f1, self.f2], init='glorot_uniform'),
                             dtype=tf.float32,
                             name='f_%d' % (self.layer_id))
        self.b = tf.Variable(np.zeros(shape=(self.f2)),
                             dtype=tf.float32,
                             name='b_%d' % (self.layer_id))
        self.g = tf.Variable(np.ones(shape=(self.f2)),
                             dtype=tf.float32,
                             name='g_%d' % (self.layer_id))
예제 #8
0
def conv_block(x, f, name, vars_dict):

    if name in vars_dict.keys():
        filters = tf.Variable(vars_dict[name],
                              trainable=False,
                              dtype=tf.float32)
    else:
        filters = tf.Variable(init_filters(size=f, init='alexnet'),
                              dtype=tf.float32)

    vars_dict[name] = filters

    ##################################

    fh, fw, fc, fo = f

    conv = tf.nn.conv2d(x, filters, [1, 1, 1, 1], 'SAME')
    bn = batch_norm(conv, fo, name, vars_dict)
    relu = tf.nn.relu(bn)

    return relu
예제 #9
0
def block(x, f1, f2, p, name):
    filters = tf.Variable(init_filters(size=[3,3,f1,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv')
    conv = tf.nn.conv2d(x, filters, [1,p,p,1], 'SAME')
    bn   = batch_norm(conv, f2, name+'_bn')
    relu = tf.nn.relu(bn)
    return relu