示例#1
0
from config import Config
import data_input
import numpy as np
import tensorflow as tf
import time
import random

conf = Config()

batch_size = 20
# batch size
L1_N = 300
L2_N = 128
# 获取数据的BOW(Bag of Word)输入
data_train = data_input.get_data_bow(conf.file_train)
data_dev = data_input.get_data_bow(conf.file_dev)
train_epoch_steps = int(len(data_train) / batch_size) - 1
dev_epoch_steps = int(len(data_dev) / batch_size) - 1


def add_fc_layer(inputs, n_input, n_output, activation=None):
    wlimit = np.sqrt(6.0 / (n_input + n_output))
    weights = tf.Variable(
        tf.random_uniform([n_input, n_output], -wlimit, wlimit))
    biases = tf.Variable(tf.random_uniform([n_output], -wlimit, wlimit))
    outputs = tf.matmul(inputs, weights) + biases
    if activation:
        outputs = tf.nn.relu(outputs)
    return outputs

示例#2
0
random.seed(9102)

start = time.time()
# 是否加BN层
norm, epsilon = False, 0.001

# negative sample
# query batch size
query_BS = 100
# batch size
L1_N = 400
L2_N = 120

# 读取数据
conf = Config()
data_train = data_input.get_data_bow(conf.file_train)
data_vali = data_input.get_data_bow(conf.file_vali)
# data_train = data_input.get_data(conf.file_train)
# data_vali = data_input.get_data(conf.file_vali)

# print(len(data_train['query']), query_BS, len(data_train['query']) / query_BS)
train_epoch_steps = int(len(data_train) / query_BS) - 1
vali_epoch_steps = int(len(data_vali) / query_BS) - 1


def add_layer(inputs, in_size, out_size, activation_function=None):
    wlimit = np.sqrt(6.0 / (in_size + out_size))
    Weights = tf.Variable(tf.random_uniform([in_size, out_size], -wlimit, wlimit))
    biases = tf.Variable(tf.random_uniform([out_size], -wlimit, wlimit))
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
示例#3
0
文件: main.py 项目: lhtlht/dssm
CUR = os.getcwd()
SUMMARY_DIR = os.path.join(CUR, 'summary')

norm, epsilon = False, 0.001
NUM_EPOCH = 5
query_bs = 100
L1_N = 400
L2_N = 120

#读取数据
file_train = os.path.join(CUR,
                          'oppo_search_round1/oppo_round1_train_20180929.txt')
file_vali = os.path.join(CUR,
                         'oppo_search_round1/oppo_round1_vali_20180929.txt')

data_train, vocab_map = data_input.get_data_bow(file_vali)
data_vali = data_input.get_data_bow(file_vali)
train_epoch_steps = int(len(data_train) / query_bs) - 1
vali_epoch_steps = int(len(data_vali) / query_bs) - 1
nwords = len(vocab_map)


def batch_normalization(x, phase_train, out_size):
    with tf.variable_scope('bn'):
        beta = tf.Variable(tf.constant(0.0, shape=[out_size]),
                           name='beta',
                           trainable=True)
        gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),
                            name='gamma',
                            trainable=True)
        batch_mean, batch_var = tf.nn.moments(