Пример #1
0
    def __init__(self):

        # ..........................................................................
        print('data pre-processing is done')

        self.x = tf.placeholder(shape=[self.batch_size, self.word_size],
                                dtype=tf.int32)
        self.y_ = tf.placeholder(shape=[self.batch_size], dtype=tf.float32)

        embeddings = tf.Variable(
            tf.random_uniform([566 + 2, self.embedding_size], -1.0, 1.0))
        embed = tf.nn.embedding_lookup(embeddings, self.x)
        X = tf.reshape(
            embed, [self.batch_size, self.word_size, self.embedding_size, 1])

        c1 = ml.conv2d(X,
                       conv_filter=[10, self.embedding_size, 1, 2],
                       padding='VALID',
                       ksize=[1, 10, 1, 1],
                       pool_stride=[1, 4, 1, 1],
                       pool_padding='SAME')
        c2 = ml.conv2d(c1,
                       conv_filter=[4, 1, 2, 4],
                       padding='SAME',
                       ksize=[1, 10, 1, 1],
                       pool_stride=[1, 5, 1, 1],
                       pool_padding='SAME')
        c3 = ml.conv2d(c2,
                       conv_filter=[5, 1, 4, 8],
                       padding='VALID',
                       ksize=[1, 1, 1, 1],
                       pool_stride=[1, 1, 1, 1],
                       pool_padding='VALID')

        out = tf.reshape(c3, shape=[self.batch_size, 8])
        self.y = tf.nn.sigmoid(ml.layer_basic(out, 1))[:, 0]
        # ...................................................................
        self.sess = tf.Session()
        saver = tf.train.Saver()
        saver.restore(
            self.sess,
            '/home/liangoy/Desktop/project/xingqiao_model/msgTfidf566')
Пример #2
0
        (cell_output_x, state_x) = gru_x(X[:, timestep], state_x)
    out_put_x = state_x

gru_y = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_y = gru_y.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_y'):
    for timestep in range(long):  # be careful
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_y, state_y) = gru_y(Y[:, timestep], state_y)
    out_put_y = state_y

#out_put = tf.concat([out_put_x, out_put_y], axis=1)
out_put = out_put_x

lay1 = tf.nn.tanh(ml.layer_basic(out_put, 4))
z = ml.layer_basic(lay1, 1)[:, 0]

loss = tf.reduce_mean((z - z_)**2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')

for i in range(10**10):
    a, b, c = next(data=data_train)
Пример #3
0
    for timestep in range(long - 1):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_x, state_x) = gru_x(X[:, timestep], state_x)
    out_put_x = state_x

gru_y = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_y = gru_y.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_y'):
    for timestep in range(long):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_y, state_y) = gru_y(Y[:, timestep], state_y)
    out_put_y = state_y

lay1_x=ml.layer_basic(out_put_x,1)
lay1_y=ml.layer_basic(out_put_y,1)

lay1=tf.concat([lay1_x,lay1_y],axis=1)
lay2=tf.nn.elu(ml.layer_basic(lay1,4))
z = ml.layer_basic(lay2, 1)[:, 0]

loss = tf.reduce_mean((z - z_) ** 2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())
Пример #4
0
        (cell_output_x, state_x) = gru(x[:, timestep], state_x)
    out_put_x = state_x

state_y = gru.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_y'):
    for timestep in range(long):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_y, state_y) = gru(y[:, timestep], state_y)
    out_put_y = state_y

out_put = tf.concat([out_put_x, out_put_y], axis=1)

lay1 = out_put_y + out_put_x

z = tf.nn.sigmoid(ml.layer_basic(lay1, 1)[:, 0])

loss = tf.reduce_sum(-z_ * tf.log(z + 0.000000001) - (1 - z_) *
                     tf.log(1 - z + 0.00000001)) / batch_size / tf.log(2.0)
gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
l2_loss = tf.contrib.layers.apply_regularization(
    tf.contrib.layers.l2_regularizer(0.1, scope=None), weights_list=gv)
all_loss = loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')
Пример #5
0
embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, X)

gru = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state = gru.zero_state(batch_size, dtype=tf.float32)
lis = []
with tf.variable_scope('RNN'):
    for timestep in range(word_size):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output, state) = gru(ml.bn_with_wb(embed[:, timestep]), state)
    out_put = state

lay1 = ml.layer_basic(out_put, 4)
lay2 = ml.layer_basic(out_put, 1)
y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) * tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................',sum(y_test)/len(y_test))

for i in range(10 ** 10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})

    if i % 10 == 0:
Пример #6
0
            tf.get_variable_scope().reuse_variables()
        (cell_output_x, state_x) = gru_x(X[:, timestep], state_x)
    out_put_x = state_x

gru_y = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_y = gru_y.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_y'):
    for timestep in range(long - 1):  # be careful
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_y, state_y) = gru_y(Y[:, timestep], state_y)
    out_put_y = state_y

out_put = tf.concat([out_put_x, out_put_y], axis=1)

lay1 = ml.layer_basic(out_put, 4)
z = ml.layer_basic(lay1, 1)[:, 0]

loss = tf.reduce_mean((z - z_)**2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')

for i in range(10**10):
    a, b, c = next(data=data_train)
Пример #7
0
c2 = ml.conv2d(c1,
               conv_filter=[4, 1, 2, 4],
               padding='SAME',
               ksize=[1, 10, 1, 1],
               pool_stride=[1, 10, 1, 1],
               pool_padding='SAME')
c3 = ml.conv2d(c2,
               conv_filter=[2, 1, 4, 8],
               padding='SAME',
               ksize=[1, 10, 1, 1],
               pool_stride=[1, 10, 1, 1],
               pool_padding='VALID')
# lay1 = tf.reshape(c2, [batch_size, -1])
# lay2 = ml.layer_basic(ml.bn(lay1), 1)
out = tf.reshape(c3, shape=[batch_size, 8])
y = tf.nn.sigmoid(ml.layer_basic(out))[:, 0]
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# loss=tf.reduce_mean((y-y_)**2)
l2_loss = tf.contrib.layers.apply_regularization(
    tf.contrib.layers.l2_regularizer(0.01, scope=None), weights_list=gv)
all_loss = loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(all_loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))

for i in range(10**10):
Пример #8
0
print(shape)

x = tf.placeholder(shape=shape, dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.int32)
training = tf.placeholder(dtype=tf.bool)

c1 = ml.conv2d(x, conv_filter=[3, 4, 3, 8], ksize=[1, 4, 5, 1], pool_stride=[1, 3, 4, 1])  # [20,40 ]
c2 = ml.conv2d(c1, conv_filter=[3, 4, 8, 16], ksize=[1, 4, 5, 1], pool_stride=[1, 2, 4, 1])  # [10,10]
c3 = ml.conv2d(c2, conv_filter=[3, 4, 16, 32], ksize=[1, 3, 3, 1], pool_stride=[1, 2, 2, 1])  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 32, 128], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([128], -1.0, 1.0))
c4 = tf.nn.conv2d(c3, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.reshape(c4, shape=[batch_size, 128])
y = tf.nn.softmax(ml.layer_basic(tf.layers.batch_normalization(out, axis=-1, training=training), 36))
loss = -tf.reduce_mean(tf.one_hot(y_, depth=36) * tf.log(y + 0.0001)) / batch_size / tf.log(2.0)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

y_out = tf.argmax(y, axis=1)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(10 ** 10):
    train_y, train_x = generate_image(batch_size)
    train_y = train_y[:, 1]
    sess.run(optimizer, feed_dict={x: train_x, y_: train_y, training: True})
Пример #9
0
    r_t = np.random.randint(0, len(train_data_t), int(batch_size / 2))
    r_f = np.random.randint(0, len(train_data_f), int(batch_size / 2))
    data = np.concatenate([train_data_t[r_t], train_data_f[r_f]])
    return data[:, 1:], data[:, 0]


x_test, y_test = test_data[:,1:], test_data[:, 0]
x = tf.placeholder(shape=[batch_size, text_size], dtype=tf.int32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

embeddings = tf.Variable(
    tf.random_uniform([566 + 3, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, x + 1)

lay1=tf.nn.elu(ml.bn(tf.reduce_sum(embed,axis=1)))
lay2=ml.layer_basic(lay1,1)
y= tf.nn.sigmoid( lay2[:,0] )
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1.0 - y_) * tf.log(1.0 - y + 0.00000001)) / batch_size / tf.log(
    2.0)
gv= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
#loss=tf.reduce_mean((y-y_)**2)
l2_loss=tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.05, scope=None), weights_list=gv)
all_loss=loss+l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))

for i in range(10 ** 10):
Пример #10
0
train_data_f = np.array([i for i in data[:-1 * batch_size] if i[0] == 0])
train_data_t = np.array([i for i in data[:-1 * batch_size] if i[0] == 1])


def next(batch_size=batch_size):
    r_t = np.random.randint(0, len(train_data_t), int(batch_size / 2))
    r_f = np.random.randint(0, len(train_data_f), int(batch_size / 2))
    data = np.concatenate([train_data_t[r_t], train_data_f[r_f]])
    return data[:, 1], data[:, 0]


x = tf.placeholder(shape=[batch_size], dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

X = tf.reshape(x, [batch_size, 1])
lay1 = tf.nn.elu(ml.layer_basic(X, 4))
lay2 = ml.layer_basic(lay1, 1)

y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# loss=tf.reduce_mean((y-y_)**2)
l2_loss = tf.contrib.layers.apply_regularization(
    tf.contrib.layers.l2_regularizer(0.7, scope=None), weights_list=gv)
all_loss = loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())
Пример #11
0
train_data, test_data = pd.DataFrame(data), pd.DataFrame(test_data)


def next():
    a = train_data.sample(batch_size)
    return np.array(a.drop(vec_size, axis=1)), np.array(a[vec_size])


x_test, y_test = np.array(test_data.drop(vec_size, axis=1)), np.array(
    test_data[vec_size])

x = tf.placeholder(shape=[batch_size, vec_size], dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

lay1 = ml.layer_basic(ml.bn_with_wb(x), size=4)
y = tf.nn.sigmoid(ml.layer_basic(lay1, size=1)[:, 0])

loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)

optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

sess = tf.Session()

# ...................................................................
sess.run(tf.global_variables_initializer())

for i in range(10**10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})
Пример #12
0
        (cell_output, state) = gru(XY[:, timestep], state)
    out_put = state

# ================================================================
gru_a = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_a = gru_a.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_a'):
    for timestep in range(long - 1):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output, state_a) = gru_a(XY[:, timestep], state_a)
    out_put_a = state_a

# ================================================================

lay1 = tf.nn.tanh(ml.layer_basic(out_put, 16))
lay2 = tf.nn.tanh(ml.layer_basic(lay1, 8))
z = ml.layer_basic(lay2, 3)

loss = tf.reduce_mean((z - z_)**2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')

for i in range(10**10):
Пример #13
0
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_x, state_x) = gru(x[:, timestep], state_x)
    out_put_x = state_x

state_y = gru.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_y'):
    for timestep in range(long):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_y, state_y) = gru(y[:, timestep], state_y)
    out_put_y = state_y

out_put = tf.concat([out_put_x, out_put_y], axis=1)

lay1 = tf.nn.elu(ml.layer_basic(out_put, 4))
z = ml.layer_basic(lay1, 1)[:, 0] + x[:, -1, 4]

loss = tf.reduce_mean((z - z_)**2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')

for i in range(10**10):
    a, b, c = next()
    sess.run(optimizer, feed_dict={x: a, y: b, z_: c})
Пример #14
0
            tf.get_variable_scope().reuse_variables()
        (cell_output_x_low, state_x_low) = gru_x_low(X[:, timestep],
                                                     state_x_low)
    out_put_x_low = state_x_low

gru_x_close = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_x_close = gru_x_close.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_x_close'):
    for timestep in range(long - 1):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_x_close,
         state_x_close) = gru_x_close(X[:, timestep], state_x_close)
    out_put_x_close = state_x_close

z_open = ml.layer_basic(out_put_x_open, 1)[:, 0]
z_high = ml.layer_basic(out_put_x_high, 1)[:, 0]
z_low = ml.layer_basic(out_put_x_low, 1)[:, 0]
z_close = ml.layer_basic(tf.nn.elu(ml.layer_basic(out_put_x_close, 4)), 1)[:,
                                                                           0]

z_open_, z_high_, z_low_, z_close_ = z_[:, 0], z_[:, 1], z_[:, 2], z_[:, 3]

loss_open = tf.reduce_mean((z_open - z_open_)**2)
loss_high = tf.reduce_mean((z_high - z_high_)**2)
loss_low = tf.reduce_mean((z_low - z_low_)**2)
loss_close = tf.reduce_mean((z_close - z_close_)**2)

# loss = (loss_open + loss_high + loss_low + loss_close) / 4
loss = loss_close
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
Пример #15
0
embeddings = tf.constant(embeddings)
embed = tf.nn.embedding_lookup(embeddings, x)

gru = tf.nn.rnn_cell.GRUCell(num_units=16,
                             reuse=tf.AUTO_REUSE,
                             activation=tf.nn.elu)
state = gru.zero_state(batch_size, dtype=tf.float32)
lis = []
with tf.variable_scope('RNN'):
    for timestep in range(word_size):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output, state) = gru(embed[:, timestep], state)
    out_put = state

lay1 = tf.nn.elu(ml.layer_basic(out_put, 4))
lay2 = ml.layer_basic(ml.bn_with_wb(lay1), 1)
y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))

for i in range(10**10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})
Пример #16
0

def next():
    a = train_data.sample(batch_size)
    return np.array(a.drop(vec_size, axis=1)), np.array(a[vec_size])


x_test, y_test = np.array(test_data.drop(vec_size, axis=1)), np.array(
    test_data[vec_size])

print(sum(y_test) / len(y_test))

x = tf.placeholder(shape=[batch_size, vec_size], dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

lay1 = ml.layer_basic(ml.bn(x), size=1)[:, 0]
y = tf.nn.sigmoid(lay1)
#loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) * tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
loss = -tf.reduce_sum(tf.abs(y + y_ - 1))

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

sess = tf.Session()

# ...................................................................
sess.run(tf.global_variables_initializer())

for i in range(10**10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})
Пример #17
0
X = x - x00

XX = X / (X + 0.0001)

c1 = ml.conv2d(tf.expand_dims(X, axis=-1), conv_filter=[5, 5, 1, 32], ksize=[1, 3, 4, 1], pool_stride=[1, 3, 4, 1],
               nn=tf.nn.relu)  # [20,40 ]
c2 = ml.conv2d(c1, conv_filter=[3, 4, 32, 64], ksize=[1, 2, 4, 1], pool_stride=[1, 2, 4, 1], nn=tf.nn.relu)  # [10,10]
c3 = ml.conv2d(c2, conv_filter=[3, 4, 64, 128], ksize=[1, 2, 2, 1], pool_stride=[1, 2, 2, 1], nn=tf.nn.relu)  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 128, 256], -1.0, 1.0))
#b = tf.Variable(tf.random_uniform([256], -1.0, 1.0))
c4 = tf.nn.conv2d(c3, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.nn.relu(tf.layers.batch_normalization( tf.reshape(c4, shape=[batch_size, 256])  ,training=training))

y = ml.layer_basic(out, 10 * 4)

y0, y1, y2, y3 = tf.nn.softmax(y[:, 0:10]), tf.nn.softmax(y[:, 10:20]), tf.nn.softmax(y[:, 20:30]), tf.nn.softmax(
    y[:, 30:40])
Y = tf.concat([y0, y1, y2, y3], axis=1)

Y_ = tf.reshape(tf.one_hot(y_, depth=10), shape=[batch_size, 10 * 4])
loss = -tf.reduce_sum(Y_ * tf.log(Y + 0.001)) / batch_size / tf.log(2.0)
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

y_out = tf.argmax(tf.reshape(Y, shape=[batch_size, 4, 10]), axis=2)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
Пример #18
0
def next():
    a = train_data.sample(batch_size)
    return np.array(a.drop('公司状态', axis=1)), np.array(a['公司状态'])


x_test, y_test = np.array(test_data.drop('公司状态', axis=1)), np.array(test_data['公司状态'])

x = tf.placeholder(shape=[batch_size, 12], dtype=tf.float32)
y_ = tf.placeholder(shape=[batch_size], dtype=tf.float32)

lis = [ml.bn(x)]
for i in range(5):
    lis.append(ml.res(lis[-1]))

lay1 = tf.nn.elu(ml.layer_basic(ml.bn(lis[-1]), size=4))
y = tf.nn.sigmoid(ml.layer_basic(lay1, size=1)[:, 0])

loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) * tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

sess = tf.Session()

# ...................................................................
sess.run(tf.global_variables_initializer())

for i in range(10 ** 10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})
Пример #19
0
               padding='VALID',
               ksize=[1, 100, 10, 1],
               pool_stride=[1, 100, 10, 1],
               pool_padding='VALID')
# c2 = ml.conv2d(c1, conv_filter=[4, 4, 1, 1], padding='VALID', ksize=[1, 20, 5, 1],
#                pool_stride=[1, 10, 2, 1],
#                pool_padding='VALID')
c3 = ml.conv2d(c1,
               conv_filter=[int(c1.shape[1]),
                            int(c1.shape[2]), 1, 1],
               padding='VALID',
               ksize=[1, 1, 1, 1],
               pool_stride=[1, 1, 1, 1],
               pool_padding='VALID')

y = tf.nn.sigmoid(ml.layer_basic(c3[:, 0, 0]))[:, 0]
#gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
# loss=tf.reduce_mean((y-y_)**2)
#l2_loss = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.2, scope=None), weights_list=gv)
#all_loss = loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.05).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))

for i in range(10**10):
    x_train, y_train = next()
Пример #20
0
        (cell_output_a_x, state_a_x) = gru_a_x(X[:, timestep], state_a_x)
    out_put_a_x = state_a_x

gru_a_y = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state_a_y = gru_a_y.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN_a_y'):
    for timestep in range(long - 1):  # be careful
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output_a_y, state_a_y) = gru_a_y(Y[:, timestep], state_a_y)
    out_put_a_y = state_a_y

out_put_a = tf.concat([out_put_a_x, out_put_a_y], axis=1)
#======================================================================================

lay1 = tf.nn.tanh(ml.layer_basic(out_put, 4))
z = ml.layer_basic(lay1, 1)[:, 0] + x[:, 0, -1] * tf.nn.sigmoid(
    ml.layer_basic(tf.nn.tanh(ml.layer_basic(out_put_a, 4)), 1)[:, 0])

loss = tf.reduce_mean((z - z_)**2)

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

# ...................................................................
sess = tf.Session(config=tf.ConfigProto(
    #inter_op_parallelism_threads=0,
    intra_op_parallelism_threads=12, ))
sess.run(tf.global_variables_initializer())
# saver=tf.train.Saver()
# saver.restore(sess,'/usr/local/oybb/project/bphs_model/hk/hs_with_open'+str(otype))
Пример #21
0
embeddings = tf.Variable(
    tf.random_uniform([1000, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, x)

gru = GRUCell(num_units=8, reuse=tf.AUTO_REUSE, activation=tf.nn.elu)
state = gru.zero_state(batch_size, dtype=tf.float32)
lis = []
with tf.variable_scope('RNN'):
    for timestep in range(word_size):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output, state) = gru(ml.bn_with_wb(embed[:, timestep]), state)
    out_put = state

lay1 = tf.nn.elu(ml.layer_basic(out_put, 4))
lay2 = ml.layer_basic(out_put, 1)
y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) * tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................', sum(y_test) / len(y_test))

for i in range(10 ** 10):
    x_train, y_train = next()
    sess.run(optimizer, feed_dict={x: x_train, y_: y_train})
Пример #22
0
c1 = ml.conv2d(X,
               conv_filter=[4, 1, 1, 2],
               ksize=[1, 11, 1, 1],
               pool_stride=[1, 10, 1, 1])
c2 = ml.conv2d(c1,
               conv_filter=[4, 1, 2, 4],
               ksize=[1, 30, 1, 1],
               pool_stride=[1, 20, 1, 1])
c3 = ml.conv2d(c2,
               conv_filter=[5, 1, 4, 8],
               padding='VALID',
               ksize=[1, 1, 1, 1],
               pool_stride=[1, 1, 1, 1])

c_out = tf.reshape(c3, [batch_size, 8])
lay2 = ml.layer_basic(c_out, 1)

y = tf.nn.sigmoid(lay2[:, 0])
loss = tf.reduce_sum(-y_ * tf.log(y + 0.000000001) - (1 - y_) *
                     tf.log(1 - y + 0.00000001)) / batch_size / tf.log(2.0)
gv = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# loss=tf.reduce_mean((y-y_)**2)
l2_loss = tf.contrib.layers.apply_regularization(
    tf.contrib.layers.l2_regularizer(0.7, scope=None), weights_list=gv)
all_loss = loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
# ...................................................................
sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('begin..................................')
Пример #23
0
               ksize=[1, 4, 5, 1],
               pool_stride=[1, 2, 4, 1],
               bn_training=training)  # [10,10]
c3 = ml.conv2d(c2,
               conv_filter=[3, 4, 16, 32],
               ksize=[1, 3, 3, 1],
               pool_stride=[1, 2, 2, 1],
               bn_training=training)  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 32, 128], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([128], -1.0, 1.0))
c4 = tf.nn.conv2d(c3, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.reshape(c4, shape=[batch_size, 128])

y0 = tf.nn.softmax(ml.layer_basic(out, 36))
y1 = tf.nn.softmax(ml.layer_basic(out, 36))
y2 = tf.nn.softmax(ml.layer_basic(out, 36))
y3 = tf.nn.softmax(ml.layer_basic(out, 36))

loss0 = -tf.reduce_mean(tf.one_hot(y_[:, 0], depth=36) *
                        tf.log(y0 + 0.0001)) / batch_size / tf.log(2.0)
loss1 = -tf.reduce_mean(tf.one_hot(y_[:, 1], depth=36) *
                        tf.log(y1 + 0.0001)) / batch_size / tf.log(2.0)
loss2 = -tf.reduce_mean(tf.one_hot(y_[:, 2], depth=36) *
                        tf.log(y2 + 0.0001)) / batch_size / tf.log(2.0)
loss3 = -tf.reduce_mean(tf.one_hot(y_[:, 3], depth=36) *
                        tf.log(y3 + 0.0001)) / batch_size / tf.log(2.0)

loss = loss0 + loss1 + loss2 + loss3
Пример #24
0
X = tf.layers.batch_normalization(x1, training=True, scale=False, center=False, axis=[0, -1])
# X=x1
gru = GRUCell(num_units=4, reuse=tf.AUTO_REUSE, activation=tf.nn.elu, kernel_initializer=tf.glorot_normal_initializer(),
              dtype=tf.float16)
state = gru.zero_state(batch_size, dtype=tf.float16)
with tf.variable_scope('RNN'):
    for timestep in range(long):
        if timestep == 1:
            tf.get_variable_scope().reuse_variables()
        (cell_output, state) = gru(X[:, timestep], state)
    out_put = state

out = tf.nn.relu(out_put)

y = ml.layer_basic(out, 1)[:, 0]

loss = tf.cast(tf.reduce_mean((y - y_) * (y - y_)),dtype=tf.float16)
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# optimizer_min = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(loss)

# ...................................................................
# gpu_options = tf.GPUOptions(allow_growth=True)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
Пример #25
0
               pool_stride=[1, 2, 4, 1],
               nn=tf.nn.relu)  # [10,10]
c3 = ml.conv2d(c2,
               conv_filter=[3, 4, 64, 128],
               ksize=[1, 2, 2, 1],
               pool_stride=[1, 2, 2, 1],
               nn=tf.nn.relu)  # [5,5]

w = tf.Variable(tf.random_uniform([5, 5, 128, 256], -1.0, 1.0))
#b = tf.Variable(tf.random_uniform([512], -1.0, 1.0))
c4 = tf.nn.conv2d(c3, filter=w, strides=[1, 1, 1, 1], padding='VALID')

out = tf.nn.relu(
    tf.layers.batch_normalization(tf.reshape(c4, shape=[batch_size, 256]),
                                  training=training))
y = tf.nn.softmax(ml.layer_basic(out, 36 * 4))
Y_ = tf.reshape(tf.one_hot(y_, depth=36), shape=[batch_size, 36 * 4])
loss = -tf.reduce_sum(Y_ * tf.log(y + 0.001)) / batch_size / tf.log(2.0)
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

y_out = tf.argmax(tf.reshape(y, shape=[batch_size, 4, 36]), axis=2)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, '/home/liangoy/project/captcha_cnn/model/cnn_4c/cnn_4c')
for i in range(10**10):
    train_y, train_x = generate_image(batch_size)