def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0, random_seed=None): Model.__init__(self) # 声明参数 init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) # 用稀疏的placeholder self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) # init参数 self.vars = init_var_map(init_vars, init_path) w = self.vars['w'] b = self.vars['b'] # sigmoid(wx+b) xw = tf.sparse_tensor_dense_matmul(self.X, w) logits = tf.reshape(xw + b, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \ l2_weight * tf.nn.l2_loss(xw) self.optimizer = get_optimizer(opt_algo, learning_rate, self.loss) # GPU设定 config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) # 初始化图里的参数 tf.global_variables_initializer().run(session=self.sess)
def __init__(self, field_sizes=None, embed_size=10, layer_sizes=None, layer_acts=None, drop_out=None, embed_l2=None, layer_l2=None, init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None): Model.__init__(self) init_vars = [] num_inputs = len(field_sizes) for i in range(num_inputs): init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype)) num_pairs = int(num_inputs * (num_inputs - 1) / 2) node_in = num_inputs * embed_size + num_pairs # node_in = num_inputs * (embed_size + num_inputs) for i in range(len(layer_sizes)): init_vars.append(('w%d' % i, [node_in, layer_sizes[i]], 'xavier', dtype)) init_vars.append(('b%d' % i, [layer_sizes[i]], 'zero', dtype)) node_in = layer_sizes[i] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)] self.y = tf.placeholder(dtype) self.keep_prob_train = 1 - np.array(drop_out) self.keep_prob_test = np.ones_like(drop_out) self.layer_keeps = tf.placeholder(dtype) self.vars = init_var_map(init_vars, init_path) w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)] xw = tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)], 1) xw3d = tf.reshape(xw, [-1, num_inputs, embed_size]) row = [] col = [] for i in range(num_inputs-1): for j in range(i+1, num_inputs): row.append(i) col.append(j) # batch * pair * k p = tf.transpose( # pair * batch * k tf.gather( # num * batch * k tf.transpose( xw3d, [1, 0, 2]), row), [1, 0, 2]) # batch * pair * k q = tf.transpose( tf.gather( tf.transpose( xw3d, [1, 0, 2]), col), [1, 0, 2]) p = tf.reshape(p, [-1, num_pairs, embed_size]) q = tf.reshape(q, [-1, num_pairs, embed_size]) ip = tf.reshape(tf.reduce_sum(p * q, [-1]), [-1, num_pairs]) # simple but redundant # batch * n * 1 * k, batch * 1 * n * k # ip = tf.reshape( # tf.reduce_sum( # tf.expand_dims(xw3d, 2) * # tf.expand_dims(xw3d, 1), # 3), # [-1, num_inputs**2]) l = tf.concat([xw, ip], 1) for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] bi = self.vars['b%d' % i] l = tf.nn.dropout( activate( tf.matmul(l, wi) + bi, layer_acts[i]), self.layer_keeps[i]) l = tf.squeeze(l) self.y_prob = tf.sigmoid(l) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y)) if layer_l2 is not None: self.loss += embed_l2 * tf.nn.l2_loss(xw) for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] self.loss += layer_l2[i] * tf.nn.l2_loss(wi) self.optimizer = get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
def __init__(self, field_sizes=None, embed_size=10, filter_sizes=None, layer_acts=None, drop_out=None, init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None): Model.__init__(self) init_vars = [] num_inputs = len(field_sizes) for i in range(num_inputs): init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype)) init_vars.append(('f1', [embed_size, filter_sizes[0], 1, 2], 'xavier', dtype)) init_vars.append(('f2', [embed_size, filter_sizes[1], 2, 2], 'xavier', dtype)) init_vars.append(('w1', [2 * 3 * embed_size, 1], 'xavier', dtype)) init_vars.append(('b1', [1], 'zero', dtype)) self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)] self.y = tf.placeholder(dtype) self.keep_prob_train = 1 - np.array(drop_out) self.keep_prob_test = np.ones_like(drop_out) self.layer_keeps = tf.placeholder(dtype) self.vars = init_var_map(init_vars, init_path) w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)] xw = tf.concat([ tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs) ], 1) l = xw l = tf.transpose(tf.reshape(l, [-1, num_inputs, embed_size, 1]), [0, 2, 1, 3]) f1 = self.vars['f1'] l = tf.nn.conv2d(l, f1, [1, 1, 1, 1], 'SAME') l = tf.transpose( max_pool_4d(tf.transpose(l, [0, 1, 3, 2]), int(num_inputs / 2)), [0, 1, 3, 2]) f2 = self.vars['f2'] l = tf.nn.conv2d(l, f2, [1, 1, 1, 1], 'SAME') l = tf.transpose(max_pool_4d(tf.transpose(l, [0, 1, 3, 2]), 3), [0, 1, 3, 2]) l = tf.nn.dropout( activate(tf.reshape(l, [-1, embed_size * 3 * 2]), layer_acts[0]), self.layer_keeps[0]) w1 = self.vars['w1'] b1 = self.vars['b1'] l = tf.matmul(l, w1) + b1 l = tf.squeeze(l) self.y_prob = tf.sigmoid(l) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y)) self.optimizer = get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
def __init__(self, field_sizes=None, embed_size=10, layer_sizes=None, layer_acts=None, drop_out=None, embed_l2=None, layer_l2=None, init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None, layer_norm=True): Model.__init__(self) init_vars = [] num_inputs = len(field_sizes) for i in range(num_inputs): init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype)) num_pairs = int(num_inputs * (num_inputs - 1) / 2) node_in = num_inputs * embed_size + num_pairs init_vars.append(('kernel', [embed_size, num_pairs, embed_size], 'xavier', dtype)) for i in range(len(layer_sizes)): init_vars.append(('w%d' % i, [node_in, layer_sizes[i]], 'xavier', dtype)) init_vars.append(('b%d' % i, [layer_sizes[i]], 'zero', dtype)) node_in = layer_sizes[i] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)] self.y = tf.placeholder(dtype) self.keep_prob_train = 1 - np.array(drop_out) self.keep_prob_test = np.ones_like(drop_out) self.layer_keeps = tf.placeholder(dtype) self.vars = init_var_map(init_vars, init_path) w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)] xw = tf.concat([ tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs) ], 1) xw3d = tf.reshape(xw, [-1, num_inputs, embed_size]) row = [] col = [] for i in range(num_inputs - 1): for j in range(i + 1, num_inputs): row.append(i) col.append(j) # batch * pair * k p = tf.transpose( # pair * batch * k tf.gather( # num * batch * k tf.transpose(xw3d, [1, 0, 2]), row), [1, 0, 2]) # batch * pair * k q = tf.transpose(tf.gather(tf.transpose(xw3d, [1, 0, 2]), col), [1, 0, 2]) # b * p * k p = tf.reshape(p, [-1, num_pairs, embed_size]) # b * p * k q = tf.reshape(q, [-1, num_pairs, embed_size]) # k * p * k k = self.vars['kernel'] # batch * 1 * pair * k p = tf.expand_dims(p, 1) # batch * pair kp = tf.reduce_sum( # batch * pair * k tf.multiply( # batch * pair * k tf.transpose( # batch * k * pair tf.reduce_sum( # batch * k * pair * k tf.multiply(p, k), -1), [0, 2, 1]), q), -1) # # if layer_norm: # # x_mean, x_var = tf.nn.moments(xw, [1], keep_dims=True) # # xw = (xw - x_mean) / tf.sqrt(x_var) # # x_g = tf.Variable(tf.ones([num_inputs * embed_size]), name='x_g') # # x_b = tf.Variable(tf.zeros([num_inputs * embed_size]), name='x_b') # # x_g = tf.Print(x_g, [x_g[:10], x_b]) # # xw = xw * x_g + x_b # p_mean, p_var = tf.nn.moments(op, [1], keep_dims=True) # op = (op - p_mean) / tf.sqrt(p_var) # p_g = tf.Variable(tf.ones([embed_size**2]), name='p_g') # p_b = tf.Variable(tf.zeros([embed_size**2]), name='p_b') # # p_g = tf.Print(p_g, [p_g[:10], p_b]) # op = op * p_g + p_b l = tf.concat([xw, kp], 1) for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] bi = self.vars['b%d' % i] l = tf.nn.dropout( activate(tf.matmul(l, wi) + bi, layer_acts[i]), self.layer_keeps[i]) l = tf.squeeze(l) self.y_prob = tf.sigmoid(l) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y)) if layer_l2 is not None: self.loss += embed_l2 * tf.nn.l2_loss(xw) #tf.concat(w0, 0)) for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] self.loss += layer_l2[i] * tf.nn.l2_loss(wi) self.optimizer = get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
def __init__(self, field_sizes=None, embed_size=10, layer_sizes=None, layer_acts=None, drop_out=None, embed_l2=None, layer_l2=None, init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None): Model.__init__(self) init_vars = [] num_inputs = len(field_sizes) for i in range(num_inputs): init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype)) node_in = num_inputs * embed_size for i in range(len(layer_sizes)): init_vars.append(('w%d' % i, [node_in, layer_sizes[i]], 'xavier', dtype)) init_vars.append(('b%d' % i, [layer_sizes[i]], 'zero', dtype)) node_in = layer_sizes[i] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)] self.y = tf.placeholder(dtype) self.keep_prob_train = 1 - np.array(drop_out) self.keep_prob_test = np.ones_like(drop_out) self.layer_keeps = tf.placeholder(dtype) self.vars = init_var_map(init_vars, init_path) w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)] xw = tf.concat([ tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs) ], 1) l = xw for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] bi = self.vars['b%d' % i] print(l.shape, wi.shape, bi.shape) l = tf.nn.dropout( activate(tf.matmul(l, wi) + bi, layer_acts[i]), self.layer_keeps[i]) l = tf.squeeze(l) self.y_prob = tf.sigmoid(l) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y)) if layer_l2 is not None: self.loss += embed_l2 * tf.nn.l2_loss(xw) for i in range(len(layer_sizes)): wi = self.vars['w%d' % i] self.loss += layer_l2[i] * tf.nn.l2_loss(wi) self.optimizer = get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)