Ejemplo n.º 1
0
def w2v_type_fully():
    x = tf.placeholder(tf.float32, [None, 600], name = 'x')
    dense_1 = tf.layers.dense(inputs=x, units=1000, activation=tf.nn.relu, name = 'dense_1')
    dense_2 = tf.layers.dense(inputs=dense_1, units=150, activation=tf.nn.tanh, name = 'dense_2')
    W = tf.Variable(tf.zeros([150, 1]), name = 'W')
    b = tf.Variable(tf.zeros([1]), name = 'b')
    y = tf.sigmoid(tf.matmul(dense_2, W) + b, name = 'y')
    y_ = tf.placeholder(tf.float32, [None, 1], name = 'y_')

    loss = tf.reduce_mean((y - y_) * (y - y_), name = 'loss')
    train_step = tf.train.AdamOptimizer(0.003).minimize(loss, name = 'train_step')
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    figer = data_utility.figer_data()
    test_xs, test_ys = figer.get_test_data(0, 10000)

    saver = tf.train.Saver()
    for i in range(0,10000):
        batch_xs, batch_ys = figer.next_batch()
        _, print_loss, print_y = sess.run([train_step, loss, y], feed_dict={x: batch_xs, y_: batch_ys})

        zero_R_count = 0.0
        NN_count = 0.0
        for j in range(0, len(print_y)):
            if batch_ys[j][0] == 0.0:
                zero_R_count += 1.0
            if abs(print_y[j] - batch_ys[j][0]) < 0.5:
                NN_count += 1.0

        zero_R_acc = zero_R_count / float(len(print_y))
        NN_acc = NN_count / float(len(print_y))

        with open('loss_record.txt', 'a') as out_file:
            out_file.write('epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                            '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')
        print ('epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                        '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')

        print_loss, print_y = sess.run([loss, y], feed_dict={x: test_xs, y_: test_ys})
        zero_R_count = 0.0
        NN_count = 0.0
        for j in range(0, len(print_y)):
            if test_ys[j][0] == 0.0:
                zero_R_count += 1.0
            if abs(print_y[j] - test_ys[j][0]) < 0.5:
                NN_count += 1.0

        zero_R_acc = zero_R_count / float(len(print_y))
        NN_acc = NN_count / float(len(print_y))

        if i%10 == 0:
            with open('loss_record.txt', 'a') as out_file:
                out_file.write('test: epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                                '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')

        if i % 100 == 0:
            model_name = 'model/model_' + str(i) + '.ckpt'
            saver.save(sess, model_name)
Ejemplo n.º 2
0
def w2v_type_L1_norm():
    x = tf.placeholder(tf.float32, [None, 600], name='x')
    x1 = tf.slice(x, [0, 0], [-1, 300])
    x2 = tf.slice(x, [0, 300], [-1, -1])
    x1_norm = tf.reshape(tf.norm(x1, ord=1, axis=1), [-1, 1])
    x2_norm = tf.reshape(tf.norm(x2, ord=1, axis=1), [-1, 1])
    x_with_norm = tf.concat([x, x1_norm, x2_norm], 1)

    dense_1 = tf.layers.dense(inputs=x_with_norm,
                              units=150,
                              activation=tf.nn.tanh,
                              name='dense_1')

    example_w = tf.placeholder(tf.float32, [None, 1], name='example_w')
    W = tf.Variable(tf.zeros([150, 1]), name='W')
    b = tf.Variable(tf.zeros([1]), name='b')
    y_logit = tf.add(tf.matmul(dense_1, W), b, name='y_logit')
    y = tf.sigmoid(y_logit, name='y')
    y_ = tf.placeholder(tf.float32, [None, 1], name='y_')

    # loss = tf.reduce_mean((y - y_) * (y - y_), name = 'loss')
    loss = tf.reduce_mean(
        tf.losses.sigmoid_cross_entropy(y_, y_logit, example_w))

    train_step = tf.train.GradientDescentOptimizer(0.33).minimize(
        loss, name='train_step')
    # sess = tf.InteractiveSession()
    # tf.global_variables_initializer().run()

    figer = data_utility.figer_data()

    saver = tf.train.Saver()

    config = tf.ConfigProto(device_count={'GPU': 1})
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(0, 40 * figer.total_batch_num):
            batch_xs, batch_ys, batch_ws = figer.next_shuffle_train_batch()
            _, print_loss = sess.run([train_step, loss],
                                     feed_dict={
                                         x: batch_xs,
                                         y_: batch_ys,
                                         example_w: batch_ws
                                     })

            with open('loss_record.txt', 'a') as out_file:
                out_file.write('train: epoch= {}  loss= {}\n' \
                        .format(i, print_loss))
                # print ('train: epoch= {}  loss= {}\n' \
                #         .format(i, print_loss))

            if i % figer.total_batch_num == 10:
                get_validation_data_info(sess, figer, i, loss, x, y, y_,
                                         example_w)
                get_train_data_info(sess, figer, i, loss, x, y, y_, example_w)
                get_test_data_info(sess, figer, i, loss, x, y, y_, example_w)
                model_name = 'model/model_' + str(i) + '.ckpt'
                saver.save(sess, model_name)
Ejemplo n.º 3
0
def context_type_RNN():

    context = tf.placeholder(tf.float32, [None, None, 300], name = 'context')
    cell = tf.contrib.rnn.BasicLSTMCell(300)
    batch_size = tf.shape(context)[1]
    initial_state = cell.zero_state(batch_size, tf.float32)
    sequence_length = tf.placeholder(tf.int32, [None], name = 'sequence_length')
    outputs, _ = tf.nn.dynamic_rnn(cell, context, sequence_length = sequence_length, initial_state=initial_state, time_major=True, dtype=tf.float32)
    final_outputs = select_rnn(outputs, sequence_length - 1)

    type_emb = tf.placeholder(tf.float32, [None, 300], name = 'type_emb')

    x = tf.concat([final_outputs, type_emb], 1, name = 'x')

    W = tf.Variable(tf.zeros([600, 1]), name = 'W')
    b = tf.Variable(tf.zeros([1]), name = 'b')
    y_logit = tf.add(tf.matmul(x, W), b, name = 'y_logit')
    y = tf.sigmoid(y_logit, name = 'y')
    y_ = tf.placeholder(tf.float32, [None, 1], name = 'y_')

    example_w = tf.placeholder(tf.float32, [None, 1], name = 'example_w')

    loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(y_, y_logit, example_w), name = 'loss')
#    train_step = tf.train.AdamOptimizer(0.003).minimize(loss, name = 'train_step')
    train_step = tf.train.GradientDescentOptimizer(0.33).minimize(loss, name = 'train_step')

    figer = data_utility.figer_data()
    saver = tf.train.Saver()

    config = tf.ConfigProto(
        device_count = {'GPU': 1}
    )
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(0, 40*figer.total_batch_num):
            batch_cs, batch_cs_lengths, batch_ls, batch_ys, batch_ws = figer.next_shuffle_train_context_batch()
            _, print_loss = sess.run([train_step, loss], feed_dict={context: batch_cs, sequence_length: batch_cs_lengths, type_emb: batch_ls, y_: batch_ys, example_w: batch_ws})
            with open('loss_record.txt', 'a') as out_file:
                out_file.write('train: epoch= {}  loss= {}\n' \
                        .format(i, print_loss))

            if i % figer.total_batch_num == 10:
                get_train_data_info(sess, figer, i, loss, y, context, sequence_length, type_emb, y_, example_w)
                get_validation_data_info(sess, figer, i, loss, y, context, sequence_length, type_emb, y_, example_w)
                get_test_data_info(sess, figer, i, loss, y, context, sequence_length, type_emb, y_, example_w)
                model_name = 'model/model_' + str(i) + '.ckpt'
                saver.save(sess, model_name)
Ejemplo n.º 4
0
def w2v_type_linear():
    x = tf.placeholder(tf.float32, [None, 600], name = 'x')
    example_w = tf.placeholder(tf.float32, [None, 1], name = 'example_w')
    W = tf.Variable(tf.zeros([600, 1]), name = 'W')
    b = tf.Variable(tf.zeros([1]), name = 'b')
    y_logit = tf.add(tf.matmul(x, W), b, name = 'y_logit')
    y = tf.sigmoid(y_logit, name = 'y')
    y_ = tf.placeholder(tf.float32, [None, 1], name = 'y_')

    # loss = tf.reduce_mean((y - y_) * (y - y_), name = 'loss')
    loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(y_, y_logit, example_w))

    train_step = tf.train.GradientDescentOptimizer(0.33).minimize(loss, name = 'train_step')
    # sess = tf.InteractiveSession()
    # tf.global_variables_initializer().run()

    figer = data_utility.figer_data()

    saver = tf.train.Saver()

    config = tf.ConfigProto(
        device_count = {'GPU': 0}
    )
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(0, 20*figer.total_batch_num):
            batch_xs, batch_ys, batch_ws = figer.next_shuffle_train_batch()
            _, print_loss = sess.run([train_step, loss], feed_dict={x: batch_xs, y_: batch_ys, example_w: batch_ws})

            with open('loss_record.txt', 'a') as out_file:
                out_file.write('train: epoch= {}  loss= {}\n' \
                        .format(i, print_loss))
                # print ('train: epoch= {}  loss= {}\n' \
                #         .format(i, print_loss))


            if i % 100 == 20:
                get_train_data_info(sess, figer, i, loss, x, y, y_, example_w)
                get_test_data_info(sess, figer, i)

            if i % 1000 == 20:
                model_name = 'model/model_' + str(i) + '.ckpt'
                saver.save(sess, model_name)
Ejemplo n.º 5
0
def context_type_CNN():
    context = tf.placeholder(tf.float32, [None, 50, 300], name = 'context')
    pos_emb = tf.placeholder(tf.float32, [None, 50, 50], name = 'lengths_emb')
    x = tf.concat([context, pos_emb], 2, name = 'x')
    conv1 = tf.layers.conv1d(inputs=x, filters=300, kernel_size=[3], padding="same", activation=tf.nn.relu)
    pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=[50], strides=1, name = 'pool1')
    pool_last = tf.reshape(pool1, [-1,300])
    type_emb = tf.placeholder(tf.float32, [None, 300], name = 'type_emb')
    concat = tf.concat([pool_last, type_emb], 1, name = 'concat')
    y_ = tf.placeholder(tf.float32, [None, 1], name = 'y_')
    W = tf.Variable(tf.zeros([600, 1]), name = 'W')
    b = tf.Variable(tf.zeros([1]), name = 'b')
    y_logit = tf.add(tf.matmul(concat, W), b, name = 'y_logit')
    y = tf.sigmoid(y_logit, name = 'y')
    example_w = tf.placeholder(tf.float32, [None, 1], name = 'example_w')
    loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(y_, y_logit, example_w), name = 'loss')
    train_step = tf.train.GradientDescentOptimizer(0.33).minimize(loss, name = 'train_step')

    figer = data_utility.figer_data()
    saver = tf.train.Saver()

    config = tf.ConfigProto(
        device_count = {'GPU': 1}
    )
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(0, 40*figer.total_batch_num):
            batch_cs, batch_pos, batch_ls, batch_ys, batch_ws = figer.next_shuffle_train_context_batch_CNN()
            _, print_loss = sess.run([train_step, loss], feed_dict={context: batch_cs, pos_emb: batch_pos, type_emb: batch_ls, y_: batch_ys, example_w: batch_ws})
            with open('loss_record.txt', 'a') as out_file:
                out_file.write('train: epoch= {}  loss= {}\n' \
                        .format(i, print_loss))
            if i % figer.total_batch_num == 10:
                get_train_data_info_CNN(sess, figer, i, loss, y, context, pos_emb, type_emb, y_, example_w)
                get_validation_data_info_CNN(sess, figer, i, loss, y, context, pos_emb, type_emb, y_, example_w)
                get_test_data_info_CNN(sess, figer, i, loss, y, context, pos_emb, type_emb, y_, example_w)
                model_name = 'model/model_' + str(i) + '.ckpt'
                saver.save(sess, model_name)
Ejemplo n.º 6
0
def context_type():

    context = tf.placeholder(tf.float32, [15, None, 300], name = 'context')
    cell = tf.contrib.rnn.BasicLSTMCell(300)
    batch_size = tf.shape(context)[1]
    initial_state = cell.zero_state(batch_size, tf.float32)
    outputs, _ = tf.nn.dynamic_rnn(cell, context, initial_state=initial_state, time_major=True, dtype=tf.float32)
    final_outputs = outputs[-1]

    type_emb = tf.placeholder(tf.float32, [None, 300], name = 'type_emb')

    x = tf.concat([final_outputs, type_emb], 1, name = 'x')

    dense_1 = tf.layers.dense(inputs=x, units=1000, activation=tf.nn.relu, name = 'dense_1')
    dense_2 = tf.layers.dense(inputs=dense_1, units=150, activation=tf.nn.tanh, name = 'dense_2')
    W = tf.Variable(tf.zeros([150, 1]), name = 'W')
    b = tf.Variable(tf.zeros([1]), name = 'b')
    y = tf.sigmoid(tf.matmul(dense_2, W) + b, name = 'y')
    y_ = tf.placeholder(tf.float32, [None, 1], name = 'y_')

    loss = tf.reduce_mean((y - y_) * (y - y_), name = 'loss')
#    train_step = tf.train.AdamOptimizer(0.003).minimize(loss, name = 'train_step')
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss, name = 'train_step')

    config = tf.ConfigProto(
        device_count = {'GPU': 0}
    )
    with tf.Session(config=config) as sess:
        tf.global_variables_initializer().run()

        figer = data_utility.figer_data()
        test_cs, test_ls, test_ys = figer.get_context_test_data(0, 10000)

        saver = tf.train.Saver()
        for i in range(0,10000):
            batch_cs, batch_ls, batch_ys = figer.next_context_batch()
            _, print_loss, print_y = sess.run([train_step, loss, y], feed_dict={context: batch_cs, type_emb: batch_ls, y_: batch_ys})

            zero_R_count = 0.0
            NN_count = 0.0
            for j in range(0, len(print_y)):
                if batch_ys[j][0] == 0.0:
                    zero_R_count += 1.0
                if abs(print_y[j] - batch_ys[j][0]) < 0.5:
                    NN_count += 1.0

            zero_R_acc = zero_R_count / float(len(print_y))
            NN_acc = NN_count / float(len(print_y))

            with open('loss_record.txt', 'a') as out_file:
                out_file.write('train: epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                                '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')
            # print ('epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
            #                 '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')

            if i%10 == 0:
                print_loss, print_y = sess.run([loss, y], feed_dict={context: test_cs, type_emb: test_ls, y_: test_ys})
                zero_R_count = 0.0
                NN_count = 0.0
                for j in range(0, len(print_y)):
                    if test_ys[j][0] == 0.0:
                        zero_R_count += 1.0
                    if abs(print_y[j] - test_ys[j][0]) < 0.5:
                        NN_count += 1.0

                zero_R_acc = zero_R_count / float(len(print_y))
                NN_acc = NN_count / float(len(print_y))
                with open('loss_record.txt', 'a') as out_file:
                    out_file.write('test: epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                                    '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')
                # print ('test: epoch = ' + str(i) + '\t lost = ' + str(print_loss) + \
                #                 '\t zero_R_acc = ' + str(zero_R_acc) + '\t NN_acc = ' + str(NN_acc) + '\n')

            if i % 100 == 0:
                model_name = 'model/model_' + str(i) + '.ckpt'
                saver.save(sess, model_name)