コード例 #1
0
    def train_with_feed(step):
        """
        Train on one epoch data by feeding
        """
        ave_cost = 0.0
        for it in six.moves.xrange(batch_num // dev_count):
            feed_list = []
            for dev in six.moves.xrange(dev_count):
                index = it * dev_count + dev
                batch_data = reader.make_one_batch_input(train_batches, index)
                feed_dict = dict(zip(dam.get_feed_names(), batch_data))
                feed_list.append(feed_dict)

            cost = train_exe.run(feed=feed_list, fetch_list=[loss.name])

            ave_cost += np.array(cost[0]).mean()
            step = step + 1
            if step % print_step == 0:
                print("processed: [" +
                      str(step * dev_count * 1.0 / batch_num) +
                      "] ave loss: [" + str(ave_cost / print_step) + "]")
                ave_cost = 0.0

            if (args.save_path is not None) and (step % save_step == 0):
                save_path = os.path.join(args.save_path, "step_" + str(step))
                print("Save model at step %d ... " % step)
                print(
                    time.strftime('%Y-%m-%d %H:%M:%S',
                                  time.localtime(time.time())))
                fluid.io.save_persistables(exe, save_path, train_program)

                score_path = os.path.join(args.save_path, 'score.' + str(step))
                test_with_feed(test_exe, test_program, dam.get_feed_names(),
                               [logits.name], score_path, val_batches,
                               val_batch_num, dev_count)

                result_file_path = os.path.join(args.save_path,
                                                'result.' + str(step))
                evaluate(score_path, result_file_path)
        return step, np.array(cost[0]).mean()
コード例 #2
0
def test_with_feed(exe, program, feed_names, fetch_list, score_path, batches,
                   batch_num, dev_count):
    """
    Test with feed
    """
    score_file = open(score_path, 'w')
    for it in six.moves.xrange(batch_num // dev_count):
        feed_list = []
        for dev in six.moves.xrange(dev_count):
            val_index = it * dev_count + dev
            batch_data = reader.make_one_batch_input(batches, val_index)
            feed_dict = dict(zip(feed_names, batch_data))
            feed_list.append(feed_dict)

            predicts = exe.run(feed=feed_list, fetch_list=fetch_list)

            scores = np.array(predicts[0])
            for dev in six.moves.xrange(dev_count):
                val_index = it * dev_count + dev
                for i in six.moves.xrange(args.batch_size):
                    score_file.write(
                        str(scores[args.batch_size * dev + i][0]) + '\t' +
                        str(batches["label"][val_index][i]) + '\n')
    score_file.close()
コード例 #3
0
 def data_provider():
     """
     Data reader
     """
     for index in six.moves.xrange(batch_num):
         yield reader.make_one_batch_input(batches, index)
コード例 #4
0
def test(args):
    """
    Test
    """
    if not os.path.exists(args.save_path):
        mkdir(args.save_path)
    if not os.path.exists(args.model_path):
        raise ValueError("Invalid model init path %s" % args.model_path)
    # data data_config
    data_conf = {
        "batch_size": args.batch_size,
        "max_turn_num": args.max_turn_num,
        "max_turn_len": args.max_turn_len,
        "_EOS_": args._EOS_,
    }

    dam = Net(args.max_turn_num, args.max_turn_len, args.vocab_size,
              args.emb_size, args.stack_num, args.channel1_num,
              args.channel2_num)
    dam.create_data_layers()
    loss, logits = dam.create_network()

    loss.persistable = True
    logits.persistable = True

    # gradient clipping
    fluid.clip.set_gradient_clip(
        clip=fluid.clip.GradientClipByValue(max=1.0, min=-1.0))

    test_program = fluid.default_main_program().clone(for_test=True)
    optimizer = fluid.optimizer.Adam(
        learning_rate=fluid.layers.exponential_decay(
            learning_rate=args.learning_rate,
            decay_steps=400,
            decay_rate=0.9,
            staircase=True))
    optimizer.minimize(loss)

    if args.use_cuda:
        place = fluid.CUDAPlace(0)
        dev_count = fluid.core.get_cuda_device_count()
    else:
        place = fluid.CPUPlace()
        #dev_count = multiprocessing.cpu_count()
        dev_count = 1

    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    fluid.io.load_persistables(exe, args.model_path)

    test_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                      main_program=test_program)

    print("start loading data ...")
    with open(args.data_path, 'rb') as f:
        if six.PY2:
            train_data, val_data, test_data = pickle.load(f)
        else:
            train_data, val_data, test_data = pickle.load(f, encoding="bytes")
    print("finish loading data ...")

    test_batches = reader.build_batches(test_data, data_conf)

    test_batch_num = len(test_batches["response"])

    print("test batch num: %d" % test_batch_num)

    print("begin inference ...")
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    score_path = os.path.join(args.save_path, 'score.txt')
    score_file = open(score_path, 'w')

    for it in six.moves.xrange(test_batch_num // dev_count):
        feed_list = []
        for dev in six.moves.xrange(dev_count):
            index = it * dev_count + dev
            batch_data = reader.make_one_batch_input(test_batches, index)
            feed_dict = dict(zip(dam.get_feed_names(), batch_data))
            feed_list.append(feed_dict)

        predicts = test_exe.run(feed=feed_list, fetch_list=[logits.name])

        scores = np.array(predicts[0])
        print("step = %d" % it)

        for dev in six.moves.xrange(dev_count):
            index = it * dev_count + dev
            for i in six.moves.xrange(args.batch_size):
                score_file.write(
                    str(scores[args.batch_size * dev + i][0]) + '\t' +
                    str(test_batches["label"][index][i]) + '\n')

    score_file.close()

    #write evaluation result
    if args.ext_eval:
        result = eva.evaluate_douban(score_path)
    else:
        result = eva.evaluate_ubuntu(score_path)
    result_file_path = os.path.join(args.save_path, 'result.txt')
    with open(result_file_path, 'w') as out_file:
        for metric in result:
            out_file.write(metric + '\t' + str(result[metric]) + '\n')
    print('finish test')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))