Пример #1
0
def read_tfrecords_pointwise(config):
    """
    read tf records
    """
    datafeed = datafeeds.TFPointwisePaddingData(config)
    input_l, input_r, label_y = datafeed.ops()
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    start_time = time.time()
    sess = tf.InteractiveSession()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    step = 0
    while not coord.should_stop():
        step += 1
        try:
            left_, right_, label_ = sess.run([input_l, input_r, label_y])
            print "pointwise data read is good"
        except tf.errors.OutOfRangeError:
            print("read %d steps" % step)
            coord.request_stop()
    coord.join(threads)
    duration = time.time() - start_time
    print("duration: %ds, step: %d" % (duration, step))
    sess.close()
Пример #2
0
def train(conf_dict):
    """
    train
    """
    training_mode = conf_dict["training_mode"]
    net = utility.import_object(
        conf_dict["net_py"], conf_dict["net_class"])(conf_dict)
    if training_mode == "pointwise":
        datafeed = datafeeds.TFPointwisePaddingData(conf_dict)
        input_l, input_r, label_y = datafeed.ops()
        pred = net.predict(input_l, input_r)
        output_prob = tf.nn.softmax(pred, -1, name="output_prob")
        loss_layer = utility.import_object(
            conf_dict["loss_py"], conf_dict["loss_class"])()
        loss = loss_layer.ops(pred, label_y)
    elif training_mode == "pairwise":
        datafeed = datafeeds.TFPairwisePaddingData(conf_dict)
        input_l, input_r, neg_input = datafeed.ops()
        pos_score = net.predict(input_l, input_r)
        output_prob = tf.identity(pos_score, name="output_preb")
        neg_score = net.predict(input_l, neg_input)
        loss_layer = utility.import_object(
            conf_dict["loss_py"], conf_dict["loss_class"])(conf_dict)
        loss = loss_layer.ops(pos_score, neg_score)
    else:
        print(sys.stderr, "training mode not supported")
        sys.exit(1)
    # define optimizer
    lr = float(conf_dict["learning_rate"])
    optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)

    # run_trainer
    controler.run_trainer(loss, optimizer, conf_dict)
Пример #3
0
def predict(conf_dict):
    """
    predict
    """
    net = utility.import_object(
        conf_dict["net_py"], conf_dict["net_class"])(conf_dict)
    conf_dict.update({"num_epochs": "1", "batch_size": "1",
                      "shuffle": "0", "train_file": conf_dict["test_file"]})
    test_datafeed = datafeeds.TFPointwisePaddingData(conf_dict)
    test_l, test_r, test_y = test_datafeed.ops()
    # test network
    pred = net.predict(test_l, test_r)
    controler.run_predict(pred, test_y, conf_dict)
Пример #4
0
def predict(conf_dict):
    tf.compat.v1.reset_default_graph()
    net = utility.import_object(conf_dict["net_py"],
                                conf_dict["net_class"])(conf_dict)
    conf_dict.update({
        "batch_size": "1",
        "shuffle": "0",
        "train_file": conf_dict["test_file"]
    })
    if "dropout_rate" in conf_dict:
        conf_dict.update({"dropout_rate": 1.0})
    test_datafeed = datafeeds.TFPointwisePaddingData(conf_dict)
    test_l, test_r, test_y = test_datafeed.ops()
    pred = net.predict(test_l, test_r)
    controler.run_predict(pred, test_y, conf_dict)
Пример #5
0
def train(conf_dict):
    tf.compat.v1.reset_default_graph()
    net = utility.import_object(conf_dict["net_py"],
                                conf_dict["net_class"])(conf_dict)
    datafeed = datafeeds.TFPointwisePaddingData(conf_dict)
    input_l, input_r, label_y = datafeed.ops()
    pred = net.predict(input_l, input_r)
    loss_layer = utility.import_object(conf_dict["loss_py"],
                                       conf_dict["loss_class"])()
    loss = loss_layer.ops(pred, label_y)
    # define optimizer
    lr = float(conf_dict["learning_rate"])
    optimizer = tf.compat.v1.train.AdamOptimizer(
        learning_rate=lr).minimize(loss)
    # run_trainer
    controler.run_trainer(loss, optimizer, conf_dict)