Ejemplo n.º 1
0
    print "Loaded"

    print "Training ===>"

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)

        sess = tf.Session(config=session_conf)

        with sess.as_default():

            char_cnn = CharConvNet(
                conv_layers=config.model.conv_layers,
                fully_layers=config.model.fully_connected_layers,
                l0=config.l0,
                alphabet_size=config.alphabet_size,
                no_of_classes=config.no_of_classes,
                th=config.model.th)

            global_step = tf.Variable(0, trainable=False)

            # boundaries = []
            # br = config.training.base_rate
            # values = [br]
            # for i in range(1, 10):
            #     values.append(br / (2 ** i))
            #     boundaries.append(15000 * i)
            # values.append(br / (2 ** (i + 1)))
            # print(values)
            # print(boundaries)
Ejemplo n.º 2
0
        train_data.get_length() / config.batch_size) + 1
    num_batch_dev = dev_data.get_length()
    print('Loaded')

    print('Training')

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            allow_soft_placement=True,  # 如果指定的设备不存在,允许TF自动分配设备
            log_device_placement=False)  # 不打印设备分配日志
        sess = tf.Session(config=session_conf)

        with sess.as_default():
            char_cnn = CharConvNet(
                conv_layers=config.model.conv_layers,
                fully_layers=config.model.fully_connected_layers,
                max_length=config.max_length,
                no_of_classes=config.no_of_classes,
                th=config.model.th)
            global_step = tf.Variable(0, trainable=False)

            optimizer = tf.train.AdamOptimizer(config.learning_rate)
            '''可以使用minimize()函数取代这两步'''
            grads_and_vars = optimizer.compute_gradients(
                char_cnn.loss)  # 计算梯度,默认对所有的Variable计算梯度
            train_op = optimizer.apply_gradients(
                grads_and_vars, global_step=global_step)  # 梯度更新

            grad_summaries = []
            for g, v in grads_and_vars:
                if g is not None:
                    grad_hist_summary = tf.summary.histogram(