Esempio n. 1
0
def dense_block(x, size):
    input_size, output_size = size

    w = tf.Variable(init_matrix(size=size, init='alexnet'), dtype=tf.float32)
    b  = tf.Variable(np.zeros(shape=output_size), dtype=tf.float32)

    fc = tf.matmul(x, w) + b
    return fc
Esempio n. 2
0
def dense(x, size, name):
    input_size, output_size = size
    w = tf.Variable(init_matrix(size=size, init='alexnet'),
                    dtype=tf.float32,
                    name=name)
    b = tf.Variable(np.zeros(shape=output_size),
                    dtype=tf.float32,
                    name=name + '_bias')
    fc = tf.matmul(x, w) + b
    return fc
Esempio n. 3
0
    def __init__(self, isize, osize):
        self.layer_id = layer.layer_id
        layer.layer_id += 1

        self.isize = isize
        self.osize = osize
        self.w = tf.Variable(init_matrix(size=(self.isize, self.osize),
                                         init='glorot_uniform'),
                             dtype=tf.float32)
        self.b = tf.Variable(np.zeros(shape=(self.osize)),
                             dtype=tf.float32,
                             trainable=False)
Esempio n. 4
0
def dense(x, size, weights, name):
    _, output_size = size
    if weights:
        w = tf.Variable(weights[name], dtype=tf.float32)
        b = tf.Variable(weights[name + '_bias'], dtype=tf.float32)
        assert (np.shape(w) == size)
        assert (np.shape(b)[0] == output_size)
    else:
        w = tf.Variable(init_matrix(size=size, init='glorot_uniform'),
                        dtype=tf.float32)
        b = tf.Variable(np.zeros(output_size), dtype=tf.float32)

    dot = tf.matmul(x, w) + b
    return dot
def dense(x, size, w, name):
    input_size, output_size = size

    trainable = (w == None)

    if w is not None:
        print('loading %s | trainable %d ' % (name, trainable))
        weights_np = w[name]
        bias_np = w[name + '_bias']
    else:
        print('making %s | trainable %d ' % (name, trainable))
        weights_np = init_matrix(size=size, init='glorot_uniform')
        bias_np = np.zeros(shape=output_size)

    w = tf.Variable(weights_np, dtype=tf.float32, trainable=trainable)
    b = tf.Variable(bias_np, dtype=tf.float32, trainable=trainable)

    out = tf.matmul(x, w) + b
    return out
###############################################################

dropout_rate = tf.placeholder(tf.float32, shape=())
learning_rate = tf.placeholder(tf.float32, shape=())

block1 = block(features, 3,  64,   2, 'block1')                                      # 64
block2 = block(block1,   64,  128,  2, 'block2')                                     # 32
block3 = block(block2,   128, 256,  2, 'block3')                                     # 16
block4 = block(block3,   256, 512,  2, 'block4')                                     # 8
block5 = block(block4,   512, 1024, 1, 'block5')                                     # 4
pool   = tf.nn.avg_pool(block5, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')  # 1

flat   = tf.reshape(pool, [args.batch_size, 1024])

mat1   = tf.Variable(init_matrix(size=(1024, 1000), init='alexnet'), dtype=tf.float32, name='fc1')
bias1  = tf.Variable(np.zeros(shape=1000), dtype=tf.float32, name='fc1_bias')
fc1    = tf.matmul(flat, mat1) + bias1

###############################################################

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc1, labels=labels))
correct = tf.equal(tf.argmax(fc1, axis=1), tf.argmax(labels, 1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))

train = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=args.eps).minimize(loss)

###############################################################

config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True