Beispiel #1
0
    def __init__(self):

        # ..........................................................................
        print('data pre-processing is done')

        self.x = tf.placeholder(shape=[self.batch_size, self.word_size],
                                dtype=tf.int32)
        self.y_ = tf.placeholder(shape=[self.batch_size], dtype=tf.float32)

        embeddings = tf.Variable(
            tf.random_uniform([566 + 2, self.embedding_size], -1.0, 1.0))
        embed = tf.nn.embedding_lookup(embeddings, self.x)
        X = tf.reshape(
            embed, [self.batch_size, self.word_size, self.embedding_size, 1])

        c1 = ml.conv2d(X,
                       conv_filter=[10, self.embedding_size, 1, 2],
                       padding='VALID',
                       ksize=[1, 10, 1, 1],
                       pool_stride=[1, 4, 1, 1],
                       pool_padding='SAME')
        c2 = ml.conv2d(c1,
                       conv_filter=[4, 1, 2, 4],
                       padding='SAME',
                       ksize=[1, 10, 1, 1],
                       pool_stride=[1, 5, 1, 1],
                       pool_padding='SAME')
        c3 = ml.conv2d(c2,
                       conv_filter=[5, 1, 4, 8],
                       padding='VALID',
                       ksize=[1, 1, 1, 1],
                       pool_stride=[1, 1, 1, 1],
                       pool_padding='VALID')

        out = tf.reshape(c3, shape=[self.batch_size, 8])
        self.y = tf.nn.sigmoid(ml.layer_basic(out, 1))[:, 0]
        # ...................................................................
        self.sess = tf.Session()
        saver = tf.train.Saver()
        saver.restore(
            self.sess,
            '/home/liangoy/Desktop/project/xingqiao_model/msgTfidf566')
Beispiel #2
0
train_data_t = np.array([i for i in data[:-1 * batch_size] if i[0] == 1])


def next(batch_size=batch_size):
    r_t = np.random.randint(0, len(train_data_t), int(batch_size / 2))
    r_f = np.random.randint(0, len(train_data_f), int(batch_size / 2))
    data = np.concatenate([train_data_t[r_t], train_data_f[r_f]])
    return data[:, 1:], data[:, 0]


x = tf.placeholder(shape=[batch_size, score_size], dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

X = tf.reshape(x, [batch_size, score_size, 1, 1])
c1 = ml.conv2d(X,
               conv_filter=[4, 1, 1, 2],
               ksize=[1, 11, 1, 1],
               pool_stride=[1, 10, 1, 1])
c2 = ml.conv2d(c1,
               conv_filter=[4, 1, 2, 4],
               ksize=[1, 30, 1, 1],
               pool_stride=[1, 20, 1, 1])
c3 = ml.conv2d(c2,
               conv_filter=[5, 1, 4, 8],
               padding='VALID',
               ksize=[1, 1, 1, 1],
               pool_stride=[1, 1, 1, 1])

c_out = tf.reshape(c3, [batch_size, 8])
lay2 = ml.layer_basic(c_out, 1)

y = tf.nn.sigmoid(lay2[:, 0])
Beispiel #3
0
        sample = data[i:i + long]
        a.append(
            np.concatenate([sample[:-1, :10], [[sample[-1][0]]] * (long - 1)],
                           axis=-1))
        b.append(sample[-1][otype])
    return a, b


x = tf.placeholder(shape=[batch_size, long - 1, 11], dtype=tf.float16)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float16)

X = tf.reshape(tf.nn.tanh(x), [batch_size, long - 1, x.shape[-1], 1])

c1 = ml.conv2d(X,
               conv_filter=[1, x.shape[-1], 1, 4],
               padding='VALID',
               ksize=[1, 1, 1, 1],
               pool_padding='VALID',
               nn=tf.nn.tanh)
c2 = ml.conv2d(c1,
               conv_filter=[4, 1, 4, 6],
               padding='SAME',
               ksize=[1, 6, 1, 1],
               pool_stride=[1, 5, 1, 1],
               pool_padding='SAME',
               nn=tf.nn.tanh)
c3 = ml.conv2d(c2,
               conv_filter=[3, 1, 6, 8],
               padding='SAME',
               ksize=[1, 6, 1, 1],
               pool_stride=[1, 6, 1, 1],
               pool_padding='VALID',
Beispiel #4
0

x_test, y_test = np.array(test_data.drop(word_size, axis=1)), np.array(
    test_data[word_size])

x = tf.placeholder(shape=[batch_size, word_size], dtype=tf.int32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, x + 1)
X = tf.reshape(embed, [batch_size, word_size, embedding_size, 1])

c1 = ml.conv2d(X,
               conv_filter=[3, embedding_size, 1, 2],
               padding='VALID',
               ksize=[1, 20, 1, 1],
               pool_stride=[1, 10, 1, 1],
               pool_padding='SAME')
c2 = ml.conv2d(c1,
               conv_filter=[4, 1, 2, 4],
               padding='SAME',
               ksize=[1, 10, 1, 1],
               pool_stride=[1, 10, 1, 1],
               pool_padding='SAME')
c3 = ml.conv2d(c2,
               conv_filter=[2, 1, 4, 8],
               padding='SAME',
               ksize=[1, 10, 1, 1],
               pool_stride=[1, 10, 1, 1],
               pool_padding='VALID')
# lay1 = tf.reshape(c2, [batch_size, -1])
Beispiel #5
0
import tensorflow as tf
from util.image import generate_image
from util import ml

batch_size = 128

shape = [batch_size] + list(generate_image(1)[1][0].shape)
print(shape)

x = tf.placeholder(shape=shape, dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.int32)
training = tf.placeholder(dtype=tf.bool)

c1 = ml.conv2d(x, conv_filter=[3, 4, 3, 8], ksize=[1, 4, 5, 1], pool_stride=[1, 3, 4, 1])  # [20,40 ]
c2 = ml.conv2d(c1, conv_filter=[3, 4, 8, 16], ksize=[1, 4, 5, 1], pool_stride=[1, 2, 4, 1])  # [10,10]
c3 = ml.conv2d(c2, conv_filter=[3, 4, 16, 32], ksize=[1, 3, 3, 1], pool_stride=[1, 2, 2, 1])  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 32, 128], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([128], -1.0, 1.0))
c4 = tf.nn.conv2d(c3, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.reshape(c4, shape=[batch_size, 128])
y = tf.nn.softmax(ml.layer_basic(tf.layers.batch_normalization(out, axis=-1, training=training), 36))
loss = -tf.reduce_mean(tf.one_hot(y_, depth=36) * tf.log(y + 0.0001)) / batch_size / tf.log(2.0)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

y_out = tf.argmax(y, axis=1)
Beispiel #6
0
print(shape)

x = tf.placeholder(shape=shape, dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size, 4], dtype=tf.int32)
training = tf.placeholder(dtype=tf.bool)

X = tf.layers.batch_normalization(x,
                                  training=True,
                                  trainable=False,
                                  scale=False,
                                  center=False,
                                  axis=0)

c1 = ml.conv2d(tf.expand_dims(X, axis=-1),
               conv_filter=[5, 5, 1, 32],
               ksize=[1, 3, 4, 1],
               pool_stride=[1, 3, 4, 1],
               nn=tf.nn.relu)  # [20,40 ]
c2 = ml.conv2d(c1,
               conv_filter=[3, 4, 32, 64],
               ksize=[1, 2, 4, 1],
               pool_stride=[1, 2, 4, 1],
               nn=tf.nn.relu)  # [10,10]
c3 = ml.conv2d(c2,
               conv_filter=[3, 4, 64, 128],
               ksize=[1, 2, 2, 1],
               pool_stride=[1, 2, 2, 1],
               nn=tf.nn.relu)  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 128, 256], -1.0, 1.0))
#b = tf.Variable(tf.random_uniform([512], -1.0, 1.0))
Beispiel #7
0
        xx.append(train_data_x[i])
        yy.append(train_data_y[i])
    return xx, yy


x_test, y_test = test_data_x[:batch_size], test_data_y[:batch_size]

x = tf.placeholder(shape=[batch_size, msg_count, msg_size], dtype=tf.int32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

embeddings = tf.constant(embeddings)
embed = tf.nn.embedding_lookup(embeddings, x)

c1 = ml.conv2d(embed,
               conv_filter=[1, 4, embedding_size, 1],
               padding='VALID',
               ksize=[1, 100, 10, 1],
               pool_stride=[1, 100, 10, 1],
               pool_padding='VALID')
# c2 = ml.conv2d(c1, conv_filter=[4, 4, 1, 1], padding='VALID', ksize=[1, 20, 5, 1],
#                pool_stride=[1, 10, 2, 1],
#                pool_padding='VALID')
c3 = ml.conv2d(c1,
               conv_filter=[int(c1.shape[1]),
                            int(c1.shape[2]), 1, 1],
               padding='VALID',
               ksize=[1, 1, 1, 1],
               pool_stride=[1, 1, 1, 1],
               pool_padding='VALID')

y = tf.nn.sigmoid(ml.layer_basic(c3[:, 0, 0]))[:, 0]
#gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
Beispiel #8
0
shape = [batch_size] + list(generate_number_image(1)[1][0].shape)
print(shape)

x = tf.placeholder(shape=shape, dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size, 4], dtype=tf.int32)
training = tf.placeholder(dtype=tf.bool)

x_mean = tf.reshape(tf.tile(tf.reduce_mean(x, axis=[1, 2]), [30 * 80]),
                    shape=[batch_size, 30, 80])

X = tf.nn.relu(tf.abs(x - x_mean) - 30)
XX = X / (X + 0.0001)

c1 = ml.conv2d(tf.expand_dims(XX, axis=-1),
               conv_filter=[5, 5, 1, 16],
               ksize=[1, 3, 4, 1],
               pool_stride=[1, 3, 4, 1],
               nn=tf.nn.relu)  # [10,20 ]
c2 = ml.conv2d(c1,
               conv_filter=[3, 4, 16, 32],
               ksize=[1, 2, 4, 1],
               pool_stride=[1, 2, 4, 1],
               nn=tf.nn.relu)  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 32, 128], -1.0, 1.0))
#b = tf.Variable(tf.random_uniform([512], -1.0, 1.0))
c3 = tf.nn.conv2d(c2, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.nn.relu(
    tf.layers.batch_normalization(tf.reshape(c3, shape=[batch_size, 128]),
                                  training=training))