예제 #1
0
def train_save(ds: Dataset, t_labs: np.ndarray, t_data: np.ndarray,
               num_feats: int, num_class: int):
    x_data, labels = placeholder(num_feats, num_class)
    weight, bias, global_steps = variables(num_feats, num_class)
    logits, predict = inferDense(x_data, weight, bias)
    acc = calAcc(labels, predict)
    train_op, loss = loss_train(labels, logits, global_steps)
    tf.summary.scalar('acc', acc)
    tf.summary.scalar('loss', loss)
    init = tf.global_variables_initializer()

    merged = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter('logs', sess.graph)

        for y_batch, x_batch in ds:
            _, gs = sess.run(fetches=[train_op, global_steps],
                             feed_dict={
                                 x_data: x_batch,
                                 labels: y_batch
                             })

            if gs % 200 == 0:
                acc_val, summary = sess.run(fetches=[acc, merged],
                                            feed_dict={
                                                x_data: t_data,
                                                labels: t_labs
                                            })
                print(type(summary))
                writer.add_summary(summary, gs)

                print(acc_val)
        writer.close()
예제 #2
0
def train_supervisor(ds: Dataset, t_labs: np.ndarray, t_data: np.ndarray, num_feats: int, num_class: int):
    x_data, labels = placeholder(num_feats, num_class)
    weight, bias, global_steps = variables(num_feats, num_class)
    logits, predict = inferDense(x_data, weight, bias)
    acc = calAcc(labels, predict)
    train_op, loss = loss_train(labels, logits, global_steps)
    tf.summary.scalar('acc', acc)
    tf.summary.scalar('loss', loss)
    init = tf.global_variables_initializer()

    merged = tf.summary.merge_all()
    sv = tf.train.Supervisor(logdir="supervise/", init_op=init, summary_op=merged)
    with sv.managed_session() as sess:
        sess.run(init)
        while not sv.should_stop():
            try:
                y_batch, x_batch = next(ds)
                print(x_batch.shape)
                print(y_batch.shape)
                _, gs = sess.run(fetches=[train_op, global_steps], feed_dict={
                    x_data: x_batch, labels: y_batch
                })
            except StopIteration:
                sv.request_stop()
        sv.close()
예제 #3
0
def train_save_model(ds: Dataset, t_labs: np.ndarray, t_data: np.ndarray,
                     num_feats: int, num_class: int):
    x_data, labels = placeholder(num_feats, num_class)
    weight, bias, global_steps = variables(num_feats, num_class)
    logits, predict = inferDense(x_data, weight, bias)
    acc = calAcc(labels, predict)
    train_op, _ = loss_train(labels, logits, global_steps)

    init = tf.global_variables_initializer()

    saved_model_dir = 'saved_model/softmax'
    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter('logs', sess.graph)

        for y_batch, x_batch in ds:
            _, gs = sess.run(fetches=[train_op, global_steps],
                             feed_dict={
                                 x_data: x_batch,
                                 labels: y_batch
                             })

            if gs % 200 == 0:
                acc_val = sess.run(fetches=[acc],
                                   feed_dict={
                                       x_data: t_data,
                                       labels: t_labs
                                   })

                print(acc_val)
        save_model('softmax_saved', sess, x_data, labels, predict, acc)
        writer.close()
예제 #4
0
def restore2(t_labs: np.ndarray, t_data: np.ndarray, num_feats=180, num_class=3):
    x_data, labels = placeholder(num_feats, num_class)
    weight, bias, global_steps = variables(num_feats, num_class)
    logits, pred, acc = infer(x_data, labels, weight, bias)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        saver.restore(sess, save_path='models/softmax')

        predict, accuy = sess.run([pred, acc], feed_dict={x_data: t_data, labels: t_labs})

        print(accuy)
예제 #5
0
        return iterator, iterator.initializer


if __name__ == '__main__':
    # trans_libsvm('data/dna.scale.t', 'data/dna.scale.tf.t')
    # trans_libsvm('data/dna.scale.tr', 'data/dna.scale.tf.tr')

    tf_files = ['data/dna.scale.tf.t', 'data/dna.scale.tf.tr']
    num_feats, num_class, num_epochs, batch_size, num_threads = 180, 3, 1000, 128, 2
    libsvmHelper = LibsvmHelper(num_feats, num_class, num_epochs, batch_size,
                                num_threads)
    # label, index, value = libsvmHelper.pipeline(tf_files, tf.get_default_graph())
    iterator, initializer = libsvmHelper.ds_iterator(tf_files)
    label, index, value = iterator.get_next()

    weight, bias, global_steps = variables(num_feats, num_class)
    logits, pred = inferSparse(index, value, weight, bias)
    train_op, loss = loss_train(tf.one_hot(label, num_class, 1, 0), logits,
                                global_steps)

    # The op for initializing the variables.
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer(), initializer)

    with tf.Session() as sess:
        sess.run(init_op)
        Writer = tf.summary.FileWriter(logdir='logs', graph=sess.graph)
        while True:
            try:
                _, loss_, gs = sess.run([train_op, loss, global_steps])
                if gs % 100 == 0: