Beispiel #1
0
 def __init__(self):
     self.w2v_wr = data_helpers.w2v_wrapper(FLAGS.w2v_file)  # 加载词向量
     self.init_model()
     self.refuse_classification_map = {
         0: '可回收垃圾',
         1: '有害垃圾',
         2: '湿垃圾',
         3: '干垃圾'
     }
Beispiel #2
0
                                              FLAGS.num_epochs)

            def dev_test():
                batches_dev = data_helpers.batch_iter(list(zip(x_dev, y_dev)),
                                                      FLAGS.batch_size, 1)
                for batch_dev in batches_dev:
                    x_batch_dev, y_batch_dev = zip(*batch_dev)
                    dev_step(x_batch_dev,
                             y_batch_dev,
                             writer=dev_summary_writer)

            # Training loop. For each batch...
            for batch in batches:
                x_batch, y_batch = zip(*batch)
                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step)
                # Training loop. For each batch...
                if current_step % FLAGS.evaluate_every == 0:
                    print("\nEvaluation:")
                    dev_test()

                if current_step % FLAGS.checkpoint_every == 0:
                    path = saver.save(sess,
                                      checkpoint_prefix,
                                      global_step=current_step)
                    print("Saved model checkpoint to {}\n".format(path))


if __name__ == "__main__":
    w2v_wr = data_helpers.w2v_wrapper(FLAGS.w2v_file)
    train(w2v_wr.model)
Beispiel #3
0

            def dev_test():
                # batches_dev = data_helpers.batch_iter(list(zip(x_dev, y_dev,entity_dev)), FLAGS.batch_size, 1)
                # for batch_dev in batches_dev:
                #     x_batch_dev, y_batch_dev ,entity_batch_dev= zip(*batch_dev)
                accuracy = dev_step(x_dev, y_dev, writer=dev_summary_writer)
                return accuracy
            # Training loop. For each batch...
            for batch in batches:
                x_batch, y_batch = zip(*batch)
                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step)
                # Training loop. For each batch...
                if current_step % FLAGS.evaluate_every == 0:
                    print("\nEvaluation:")
                    accuracy = dev_test()
                    if accuracy > best_accurcy:
                        best_accurcy = accuracy
                        print("bset_accuracy {}\n".format(best_accurcy))
                        path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                        path_pass = str(path)
                        path_pass = path_pass.split('\\')
                        print(path_pass)
                        print("Saved model checkpoint to {}\n".format(path))
    return x_dev,y_dev,path_pass

if __name__ == "__main__":
    w2v_wr = data_helpers.w2v_wrapper(FLAGS.pre_emb_file)
    x_dev,y_dev,path = train(w2v_wr.model)
    eval(x_dev,y_dev,path)