Exemple #1
0
    def inference_net(self, C, reuse=None):
        # input
        h = tf.nn.relu(tf.sparse_matmul(C, self.inference_params[0], a_is_sparse=True))
        # intermediate hidden layer
        for l in range(1, self.L):
            h = tf.nn.relu(tf.sparse_matmul(h, self.inference_params[l], a_is_sparse=True))

        # output
        beta = tf.nn.softplus(tf.matmul(h, self.inference_params[self.L], a_is_sparse=True)) + 0.3
        alpha = tf.nn.softplus(tf.matmul(h, self.inference_params[self.L + 1], a_is_sparse=True)) + 0.3

        return alpha, beta
Exemple #2
0
 def nn_decision_tree(self, x, cut_points_list, leaf_score):
     # cut_points_list contains the cut_points for each dimension of feature
     return tf.sparse_matmul(reduce(
         self.tf_kron_prod,
         map(lambda z: self.tf_bin(x[:, z[0]:z[0] + 1], z[1]),
             enumerate(cut_points_list))),
                             leaf_score,
                             a_is_sparse=True,
                             b_is_sparse=True)
Exemple #3
0
    def generative_net(self, Z, reuse=None):

        # with tf.variable_scope("generative",reuse=reuse):
        if self.w_determinist:
            h2 = tf.nn.relu(tf.matmul(Z, self.generator_params[0]))
            for l in range(1, self.L):
                h2 = tf.nn.relu(tf.sparse_matmul(h2, self.generator_params[l], a_is_sparse=True))
            d_x = tf.nn.sigmoid(tf.matmul(h2, self.generator_params[self.L], a_is_sparse=True))
        else:
            e = tf.random_normal(tf.shape(self.generator_params[0]), dtype=tf.float32, mean=0., stddev=1.0,
                                 name='epsilon')
            h2 = tf.nn.relu(tf.matmul(Z, self.generator_params[0] + 0.01 * e))
            for l in range(1, self.L):
                e = tf.random_normal(tf.shape(self.generator_params[l]), dtype=tf.float32, mean=0., stddev=1.0,
                                     name='epsilon')
                h2 = tf.nn.relu(tf.sparse_matmul(h2, self.generator_params[l] + 0.01 * e, a_is_sparse=True))
            e = tf.random_normal(tf.shape(self.generator_params[self.L]), dtype=tf.float32, mean=0., stddev=1.0,
                                 name='epsilon')
            d_x = tf.nn.sigmoid(tf.matmul(h2, self.generator_params[self.L] + 0.01 * e, a_is_sparse=True))
        return d_x
Exemple #4
0
    def InitFM(self, output_type="finish"):
        '''
        初始化graph图
        :param output: (str)"finish" or "like"
        :return: None
        '''
        # Define weights
        with tf.variable_scope('weights'+output_type):
            W0 = tf.Variable(0.1, name="W0")
            W1 = tf.Variable(tf.truncated_normal([self.embed_num]), name="W1")
            W2 = tf.Variable(tf.truncated_normal([self.embed_num, self.embedding_size]), name="W2")

        # Calculate the linear part
        with tf.variable_scope("linear"+output_type):
            # y_linear shape(batch_size, 1)
            y_linear = tf.sparse_matmul(a=self.input_sparse,
                                        b=W1,
                                        transpose_b=True,
                                        a_is_sparse=True,
                                        name="y_linear")

        # Calculate the cross part
        with tf.variable_scope("cross"+output_type):
            # part1 shape(batch_size, embedding_size)
            part1 = tf.matmul(a=self.input_sparse,
                              b=W2,
                              a_is_sparse=True,
                              name="part1")
            part1_square = tf.square(part1, name="cross1_square")

            # part2 shape(batch_size, embedding_size)
            W2_square = tf.square(W2,name="W2_square")
            input_sparse_square = tf.math.square(self.input_sparse,name="input_square")
            part2 = tf.matmul(a=input_sparse_square,
                              b=W2_square,
                              a_is_sparse=True,
                              name="part2")

            # apply sub
            sub = tf.subtract(part1_square, part2, name="part1_part2_sub")

            # get cross output, shape(batch_size, 1)
            cross_out = 0.5 * tf.reduce_sum(sub, reduction_indices=1,
                                            keep_dims=True,
                                            name="cross_out")

        # Calculate the final result
        with tf.variable_scope("output"+output_type):
            logits = W0 + y_linear + cross_out

        return logits
Exemple #5
0
def fc_layer(x, output_num, activation='none', dropout=None, name='fc'):
    weight = tf.get_variable(
        name, [47236, output_num],
        initializer=tf.random_normal_initializer(stddev=0.01))
    bias = tf.Variable(tf.zeros(output_num))

    y = tf.sparse_matmul(x, weight, a_is_sparse=True)
    y = tf.sparse_add(y, bias)

    if activation == 'relu':
        y = tf.nn.relu(y)
    elif activation == 'sigmoid':
        y = tf.nn.sigmoid(y)
    elif activation == 'none':
        pass

    if not dropout is None:
        y = tf.nn.dropout(y, dropout)
    return y
Exemple #6
0
print("check")
indices = sorted(list(set(indices)), key=lambda x: (x[0], x[1]))
print("check")
enum = len(indices)
print("check")
values = np.ones(enum, dtype=np.float32)
print("check")

spmat = tf.SparseTensor(indices,values,[nnum, nnum])

print("check")
var = tf.Variable(np.random.rand(nnum,K).astype(np.float32))
mat = tf.constant(np.random.rand(K,K).astype(np.float32))
print("check")

mul2 = tf.sparse_matmul(spmat, var)

targets = tf.placeholder(name="target", dtype="bool", shape=[nnum])

spmat_t = tf.boolean_mask(spmat, targets)
var_t = tf.boolean_mask(var, targets)

mul = tf.matmul(var_t, tf.matmul(spmat_t, var_t, a_is_sparse=True, transpose_a=True), transpose_b=True)

print("check")
loss = tf.nn.l2_loss(mul)
init = tf.initialize_all_variables()
sess = tf.InteractiveSession()
sess.run(init)
import pdb; pdb.set_trace()
Exemple #7
0
    def __init__(self,
                 params,
                 init_embeddings,
                 init_weights,
                 G,
                 modify_list,
                 num_new=1):
        self.num_nodes_init, self.embedding_size = init_embeddings.shape
        self.num_nodes = self.num_nodes_init + num_new
        self.num_modify = len(modify_list)

        self.batch_size = params["batch_size"]
        self.learn_rate = params["learn_rate"]
        self.optimizer = params[
            "optimizer"] if "optimizer" in params else "GradientDescentOptimizer"
        self.tol = params["tol"] if "tol" in params else 0.0001
        self.neighbor_size = params["neighbor_size"]
        self.negative_distortion = params["negative_distortion"]
        self.num_sampled = params["num_sampled"]
        self.epoch_num = params["epoch_num"]
        self.lbd = params["lambda"]

        self.bs = __import__(
            "batch_strategy." + params["batch_strategy"]["func"],
            fromlist=["batch_strategy"
                      ]).BatchStrategy(G, num_new, modify_list,
                                       params["batch_strategy"])

        unigrams_in = None
        if "in_negative_sampling_distribution" in params:
            unigrams_in = getattr(
                dh, params["in_negative_sampling_distribution"]["func"])(
                    G, params["in_negative_sampling_distribution"])
        unigrams_out = None
        if "out_negative_sampling_distribution" in params:
            unigrams_out = getattr(
                dh, params["out_negative_sampling_distribution"]["func"])(
                    G, params["out_negative_sampling_distribution"])

        self.tensor_graph = tf.Graph()

        with self.tensor_graph.as_default():
            tf.set_random_seed(157)
            self.init_embeddings = tf.constant(init_embeddings)
            self.init_weights = tf.constant(init_weights)

            self.mask_matrix = tf.sparse_to_dense(
                sparse_indices=[(modify_list[i], i)
                                for i in xrange(self.num_modify)],
                output_shape=[self.num_nodes_init, self.num_modify],
                sparse_values=1.0,
                validate_indices=False)

            self.delta_embeddings_pre = tf.Variable(tf.random_uniform(
                [self.num_modify, self.embedding_size], -1.0, 1.0),
                                                    dtype=tf.float32)
            self.delta_weights_pre = tf.Variable(tf.random_uniform(
                [self.num_modify, self.embedding_size], -1.0, 1.0),
                                                 dtype=tf.float32)
            self.delta_embeddings = tf.sparse_matmul(self.mask_matrix,
                                                     self.delta_embeddings_pre,
                                                     a_is_sparse=True)
            self.delta_weights = tf.sparse_matmul(self.mask_matrix,
                                                  self.delta_weights_pre,
                                                  a_is_sparse=True)

            self.embeddings = self.init_embeddings + self.delta_embeddings
            self.weights = self.init_weights + self.delta_weights

            self.x_in = tf.placeholder(tf.int64, shape=[None])
            self.x_out = tf.placeholder(tf.int64, shape=[None])
            self.labels_in = tf.placeholder(tf.int64,
                                            shape=[None, self.neighbor_size])
            self.labels_out = tf.placeholder(tf.int64,
                                             shape=[None, self.neighbor_size])

            self.new_embeddings = tf.Variable(tf.random_uniform(
                [num_new, self.embedding_size], -1.0, 1.0),
                                              dtype=tf.float32)
            self.new_weights = tf.Variable(tf.random_uniform(
                [num_new, self.embedding_size], -1.0, 1.0),
                                           dtype=tf.float32)
            self.nce_biases = tf.zeros([self.num_nodes], tf.float32)

            self.embed = tf.concat([self.embeddings, self.new_embeddings], 0)
            self.w = tf.concat([self.weights, self.new_weights], 0)

            self.delta_embeddings_pad = tf.concat([
                self.delta_embeddings,
                tf.zeros([num_new, self.embedding_size], dtype=tf.float32)
            ],
                                                  axis=0)
            self.delta_weights_pad = tf.concat([
                self.delta_weights,
                tf.zeros([num_new, self.embedding_size], dtype=tf.float32)
            ],
                                               axis=0)

            self.embedding_batch = tf.nn.embedding_lookup(
                self.embed, self.x_in)
            self.weight_batch = tf.nn.embedding_lookup(self.w, self.x_out)
            self.delta_embeddings_batch = tf.nn.embedding_lookup(
                self.delta_embeddings_pad, self.x_in)
            self.delta_weights_batch = tf.nn.embedding_lookup(
                self.delta_weights_pad, self.x_out)

            if unigrams_in is None:
                self.loss_in = tf.reduce_mean(
                    tf.nn.nce_loss(weights=self.w,
                                   biases=self.nce_biases,
                                   labels=self.labels_in,
                                   inputs=self.embedding_batch,
                                   num_sampled=self.num_sampled,
                                   num_classes=self.num_nodes,
                                   num_true=self.neighbor_size))
            else:
                self.sampled_values_in = tf.nn.fixed_unigram_candidate_sampler(
                    true_classes=self.labels_in,
                    num_true=self.neighbor_size,
                    num_sampled=self.num_sampled,
                    unique=False,
                    range_max=self.num_nodes,
                    distortion=self.negative_distortion,
                    unigrams=unigrams_in)
                self.loss_in = tf.reduce_mean(
                    tf.nn.nce_loss(weights=self.w,
                                   biases=self.nce_biases,
                                   labels=self.labels_in,
                                   inputs=self.embedding_batch,
                                   num_sampled=self.num_sampled,
                                   num_classes=self.num_nodes,
                                   num_true=self.neighbor_size,
                                   sampled_values=self.sampled_values_in))

            if unigrams_out is None:
                self.loss_out = tf.reduce_mean(
                    tf.nn.nce_loss(weights=self.embed,
                                   biases=self.nce_biases,
                                   labels=self.labels_out,
                                   inputs=self.weight_batch,
                                   num_sampled=self.num_sampled,
                                   num_classes=self.num_nodes,
                                   num_true=self.neighbor_size))
            else:
                self.sampled_values_out = tf.nn.fixed_unigram_candidate_sampler(
                    true_classes=self.labels_out,
                    num_true=self.neighbor_size,
                    num_sampled=self.num_sampled,
                    unique=False,
                    range_max=self.num_nodes,
                    distortion=self.negative_distortion,
                    unigrams=unigrams_out)
                self.loss_out = tf.reduce_mean(
                    tf.nn.nce_loss(weights=self.embed,
                                   biases=self.nce_biases,
                                   labels=self.labels_out,
                                   inputs=self.weight_batch,
                                   num_sampled=self.num_sampled,
                                   num_classes=self.num_nodes,
                                   num_true=self.neighbor_size,
                                   sampled_values=self.sampled_values_out))

            self.loss = self.loss_out + self.loss_in + self.lbd * (
                tf.norm(self.delta_embeddings_batch) +
                tf.norm(self.delta_weights_batch))
            self.train_step = getattr(tf.train, self.optimizer)(
                self.learn_rate).minimize(self.loss)
Exemple #8
0
                                           att_neigh[self.adj.indices[i][1]])
                          for n in range(int(self.adj.indices.shape[0]))
                      ],
                      dense_shape=self.adj.dense_shape)

list(D_sp.indices).eval()

A_sp.eval()
D_sp.eval()
D = tf.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9]], dtype=tf.int64)
cc = tf.sparse_tensor_dense_matmul(A_sp, tf.transpose(D))
cc.eval()

c = tf.sparse_tensor_dense_matmul(A_sp, D_sp)

c = tf.sparse_matmul(A_sp, D_sp, a_is_sparse=True, b_is_sparse=True)

c = tf.matmul(tf.sparse_tensor_to_dense(A_sp, 0),
              tf.sparse_tensor_to_dense(D_sp, 0),
              a_is_sparse=True,
              b_is_sparse=True)

tf.sparse_tensor_to_dense(A_sp, 0)
tf.sparse_tensor_to_dense(D_sp, 0)

sess.run(A_sp.dense_shape)
sess.run(D_sp.dense_shape)

len(G.nodes())

dim_in = 8
    def __init__(self,
                 inputh,
                 n_in,
                 n_out,
                 mat_enc,
                 middle_activation=tf.nn.relu,
                 final_activation=tf.nn.sigmoid):

        self.n_in = n_in
        self.n_out = n_out
        #self.chromo = chromo
        self.mat_enc = mat_enc

        self.input = inputh

        self.con_mat_var_map = {}
        self.wei_mat_var_map = {}
        for key in self.mat_enc.CMatrix.keys():
            self.con_mat_var_map[key] = tf.Variable(
                initial_value=self.mat_enc.CMatrix[key].astype('float32'),
                name='con_mat' + key,
                dtype=tf.float32)
            self.wei_mat_var_map[key] = tf.Variable(
                initial_value=self.mat_enc.WMatrix[key],
                name='con_mat' + key,
                dtype=tf.float32)

        to_effec_mat_node_map = {}
        for key in self.con_mat_var_map.keys():
            to_effec_mat_node_map[key] = opt_compwise_multiply(
                self.con_mat_var_map[key], self.wei_mat_var_map[key])

        density_map = {}
        for key in to_effec_mat_node_map.keys():
            density_map[key] = find_density(to_effec_mat_node_map[key])
        self.bias_wei_arr = np.array(
            [item.weight for item in self.mat_enc.Bias_conn_arr])
        self.bias_var = tf.Variable(initial_value=self.bias_wei_arr,
                                    name="bias",
                                    dtype=tf.float32)

        input_till_H2 = None

        if 'IH1' in to_effec_mat_node_map.keys():
            input_till_H1 = middle_activation(
                tf.sparse_matmul(self.input,
                                 to_effec_mat_node_map['IH1'],
                                 b_is_sparse=True))

        if 'IH2' in to_effec_mat_node_map.keys():
            input_till_H2 = tf.sparse_matmul(self.input,
                                             to_effec_mat_node_map['IH2'],
                                             b_is_sparse=True)

        if 'H1H2' in to_effec_mat_node_map.keys():
            assert ('IH1' in to_effec_mat_node_map.keys())
            twoh = tf.sparse_matmul(input_till_H1,
                                    to_effec_mat_node_map['H1H2'],
                                    b_is_sparse=True)
            if 'IH2' in to_effec_mat_node_map.keys():
                input_till_H2 = tf.add(twoh, input_till_H2)

            else:
                input_till_H2 = twoh

        if input_till_H2 is not None:
            input_till_H2 = middle_activation(input_till_H2)

        output = None
        if 'H2O' in to_effec_mat_node_map.keys():
            assert ('IH2' in to_effec_mat_node_map.keys()
                    or 'H1H2' in to_effec_mat_node_map.keys())
            threeh = tf.sparse_matmul(input_till_H2,
                                      to_effec_mat_node_map['H2O'],
                                      b_is_sparse=True)

            output = threeh

        if 'H1O' in to_effec_mat_node_map.keys():
            assert ('IH1' in to_effec_mat_node_map.keys())
            fourh = tf.sparse_matmul(input_till_H1,
                                     to_effec_mat_node_map['H1O'],
                                     b_is_sparse=True)

            if output is not None:
                output = tf.add(output, fourh)
            else:
                output = fourh

        if 'IO' in to_effec_mat_node_map.keys():
            assert ('IO' in to_effec_mat_node_map.keys())
            fifth = tf.sparse_matmul(self.input,
                                     to_effec_mat_node_map['IO'],
                                     b_is_sparse=True)
            if output is not None:
                output = tf.add(output, fifth)
            else:
                output = fifth

        output = final_activation(output)
        """input_till_H2 = middle_activation(
                            tf.add(
                                tf.sparse_matmul(self.input, to_effec_mat_node_map['IH2'], b_is_sparse = True),
                                tf.sparse_matmul( input_till_H1, to_effec_mat_node_map['H1H2'], b_is_sparse = True)
                            )
                        )


        output    = final_activation(
                        tf.add(
                            tf.add(
                                    tf.add(
                                            tf.sparse_matmul(input_till_H2, to_effec_mat_node_map['H2O'], b_is_sparse = True ),
                                            tf.sparse_matmul(input_till_H1,to_effec_mat_node_map['H1O'], b_is_sparse = True)
                                    ),
                                    tf.sparse_matmul(self.input, to_effec_mat_node_map['IO'] , b_is_sparse = True)
                            ),

                            self.bias_var
                        )
                    )
        """
        self.p_y_given_x = output

        half = tf.constant(0.5, dtype=self.p_y_given_x.dtype)
        if int(self.bias_wei_arr.shape[0]) != 1:
            self.y_pred = tf.argmax(self.p_y_given_x, axis=1)
        else:
            half = tf.constant(0.5, dtype=self.p_y_given_x.dtype)
            dadum = tf.constant(0.5, dtype=self.p_y_given_x.dtype)
            q = tf.scan(lambda last, current: current[0],
                        elems=self.p_y_given_x,
                        initializer=dadum)
            s = tf.scan(lambda y, x: tf.greater_equal(x, half),
                        elems=q,
                        initializer=False)
            #print("herehrerhehrehrehrehrhe", s)
            # print("hi",s)
            self.y_pred = tf.cast(s, dtype=tf.int32)

        self.params = [
            self.wei_mat_var_map[key] for key in self.wei_mat_var_map.keys()
        ] + [self.bias_var]
Exemple #10
0
tf.to_int64()

tf.trace()
tf.trainable_variables()
tf.transpose()
tf.truncated_normal()
tf.truediv()
tf.sparse_transpose()
tf.sparse_tensor_dense_matmul()
tf.sparse_accumulator_apply_gradient()
tf.sparse_accumulator_take_gradient()
tf.sparse_add()
tf.sparse_concat()
tf.sparse_conditional_accumulator()
tf.sparse_mask()
tf.sparse_matmul()
tf.sparse_maximum()
tf.sparse_merge()
tf.sparse_minimum()

tf.sparse_reduce_max()
tf.sparse_reduce_max_sparse()

tf.reduce_all()
tf.reduce_any()
tf.reduce_join()
tf.reduce_logsumexp()
tf.reduce_max()
tf.reduce_mean()
tf.reduce_min()
tf.reduce_prod()
Exemple #11
0
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  #close the warning
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.sparse_matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 10])

# launch the module in a session
#sess = tf.Session()
sess = tf.InteractiveSession()

# cross-entropy cost function
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

# use backpropagation algorithm to minimize, 0.01: learning rate
# gradient descent algorithm
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# initialize variable
#init = tf.global_variables_initializer()
# evaluate model
with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def system_mat_fields(fwd_model, elec_imped=False):
    p = fwd_model_parameters(fwd_model)
    d0 = p['n_dims'] + 0
    d1 = p['n_dims'] + 1
    e = p['n_elem']
    n = p['n_node']
    num_elc = p['n_elec']
    FF_shape = [d0 * e, d1 * e]
    CC_shape = [d1 * e, n]

    FFjidx = np.floor(
        np.arange(d0 * e).T.reshape([d0 * e, 1]) / d0) * d1 * np.ones(
            (1, d1)) + np.ones(
                (d0 * e, 1)).reshape([d0 * e, 1]) * np.arange(1, d1 + 1)
    FFiidx = np.arange(1, d0 * e + 1).T.reshape([d0 * e, 1]) * np.ones((1, d1))
    FFdata = np.zeros([d0 * e, d1])
    dfact = (d0 - 1) * d0

    for j in range(1, e + 1):
        a = inv(
            np.hstack((np.ones((d1, 1)), (p['nodes'][p['elems'][j - 1] - 1]))))
        idx = np.arange(d0 * (j - 1) + 1, d0 * j + 1)
        FFdata[np.array(idx - 1),
               0:d1] = a[np.arange(1, d1), :] / np.sqrt(dfact * np.abs(det(a)))

    CCdata = np.ones((d1 * e, 1))

    [F2data, F2iidx, F2jidx, C2data, C2iidx,
     C2jidx] = compl_elec_mdl(fwd_model, p, elec_imped)

    FF1_idx = np.vstack(
        (FFiidx.flatten('F'), FFjidx.flatten('F'))).astype('int') - 1
    CC1_idx = np.vstack(
        (np.arange(1, d1 * e + 1), p['elems'].flatten())).astype('int') - 1

    nn_elc = C2data.shape[0]

    FF_shape = [ffs + nn_elc for ffs in FF_shape]
    if (C2jidx.shape[0] > 0 and C2iidx.shape[0] > 0):
        CC_shape = [
            np.max(C2iidx).astype('int') + 1,
            np.max(C2jidx).astype('int') + 1
        ]

    F2_idx = np.vstack(
        (F2iidx.flatten('F'), F2jidx.flatten('F'))).astype('int')
    C2_idx = np.vstack(
        (C2iidx.flatten('F'), C2jidx.flatten('F'))).astype('int')

    FFdata = FFdata.astype(np.float32)
    CCdata = CCdata.astype(np.float32)
    F2data = F2data.astype(np.float32)
    C2data = C2data.astype(np.float32)

    FF1 = tf.SparseTensor(FF1_idx.T, FFdata.flatten('F'), dense_shape=FF_shape)
    CC1 = tf.SparseTensor(CC1_idx.T, CCdata.flatten('F'), dense_shape=CC_shape)

    FF2 = tf.SparseTensor(F2_idx.T, F2data.flatten('F'), dense_shape=FF_shape)
    CC2 = tf.SparseTensor(C2_idx.T, C2data.flatten('F'), dense_shape=CC_shape)
    FF = tf.sparse_add(FF1, FF2)
    CC = tf.sparse_add(CC1, CC2)

    FC = tf.sparse_matmul(tf.sparse_tensor_to_dense(FF,
                                                    validate_indices=False),
                          tf.sparse_tensor_to_dense(CC,
                                                    validate_indices=False),
                          a_is_sparse=True,
                          b_is_sparse=True)

    return FC, FF1, FF2, CC1, CC2
Exemple #13
0
def sparseNetworkSolver(data, given_learning_rate, n_epochs, batch_size):
    n_iter, nn, _ = np.shape(data)
    """
    * indices: generate a list to store the non-zero indices of the weights
    * values: generate a list to store the values at those indices
    * dense_shape: shape of the co-efficient matrix
    """
    indices = []
    indices.append([0, 0])
    indices.append([0, 1])
    for i in range(1, nn - 1):
        indices.append([i, i - 1])
        indices.append([i, i])
        indices.append([i, i + 1])
    indices.append([nn - 1, nn - 2])
    indices.append([nn - 1, nn - 1])

    nn_nonZero_entries = 2 + 3 * (nn - 2) + 2
    values = np.random.rand(nn_nonZero_entries)

    weights = tf.SparseTensor(indices=indices,
                              values=values,
                              dense_shape=[nn, nn])
    X = tf.placeholder(tf.float32, shape=(None, nn), name="batchX")
    y = tf.placeholder(tf.float32, shape=(None, nn), name="batchY")
    learning_rate = tf.placeholder(tf.float32, shape=(), name="learning_rate")

    return_weights = tf.sparse_tensor_to_dense(weights)

    output = tf.sparse_matmul(weights,
                              X,
                              transpose_a=False,
                              transpose_b=True,
                              a_is_sparse=True,
                              b_is_sparse=False,
                              name="multiply_Operation")
    mse = tf.reduce_mean(tf.square(y - tf.transpose(output)), name="mse")

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

    training_op = optimizer.minimize(mse)
    init = tf.global_variables_initializer()

    count = 0
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(n_epochs):
            if epoch >= 30000:
                given_learning_rate = 0.01
            x_batch, y_batch = fetch_batch(data, batch_size, epoch, n_iter)
            try:
                sess.run(training_op,
                         feed_dict={
                             X: x_batch,
                             y: y_batch,
                             learning_rate: given_learning_rate
                         })
            except:
                sess.run("Training operation failed")
            if epoch % 100 == 0:
                print(mse.eval(feed_dict={X: x_batch, y: y_batch}))
        sess.run(return_weights)
    return 0.0
x_data = xy[:, 0:3]
y_data = xy[:, 3:5]

# Make sure the shape and data are OK
print(x_data.shape, x_data)
print(y_data.shape, y_data)

# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 2])

W = tf.Variable(tf.random_normal([3, 2]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')

# Hypothesis
hypothesis = tf.sparse_matmul(X, W) + b

# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
#cost = tf.reduce_max(tf.square(hypothesis - Y))

# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)

# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())

for step in range(5001):
Exemple #15
0
# for i in range(pp['n_elec']):
#     fwd_model['electrode'][i].z_contact = np.random.random(1)[0]
#     zc[i] = fwd_model['electrode'][i].z_contact
#
# img['elem_data'] =np.random.random([pp['n_elem'], 1])

E_0 = sysmat.system_mat_1st_order(fwd_model, img)
E = sysmat.system_mat_1st_order_elec(fwd_model, img)

init = tf.initialize_all_tables()
sess = tf.Session()
sess.run(init)
dfc = sess.run(E)
FCc, FF1, FF2, CC1, CC2 = system_mat_fields(fwd_model, elec_imped=True)
fc1 = tf.sparse_matmul(tf.sparse_tensor_to_dense(FF1, validate_indices=False),
                       tf.sparse_tensor_to_dense(CC1, validate_indices=False),
                       a_is_sparse=True,
                       b_is_sparse=True)
fc2 = tf.sparse_matmul(tf.sparse_tensor_to_dense(FF2, validate_indices=False),
                       tf.sparse_tensor_to_dense(CC2, validate_indices=False),
                       a_is_sparse=True,
                       b_is_sparse=True)

fcc = tf.add(fc1, fc2)
#assert (np.max(np.abs(sess.run(E - E_0)))< 1e-4)
#assert(np.max(np.abs(sess.run(FCc - fcc))) < 1e-6)
#assert(np.max(np.abs(dfc- E_tr) < 1.0e-5)

# print
data = lm.loadmat('./data/model_sysmat')

# imdl = data['imdl']
st = tf.SparseTensor(indices=[[0, 0], [1, 2]],
                     values=[1.0, 2.0],
                     dense_shape=[30, 40])

mat = tf.placeholder(shape=(40, 30), dtype=tf.float32)

npmat = np.zeros((40, 30))
npmat[0][0] = 2.1
npmat[1][1] = 3.2
npmat[30][23] = 4.5
newmat = tf.matmul(tf.sparse_to_dense(st.indices, st.dense_shape, st.values),
                   mat)
newnewmat = tf.sparse_matmul(tf.sparse_to_dense(st.indices, st.dense_shape,
                                                st.values),
                             mat,
                             a_is_sparse=True,
                             b_is_sparse=True)

with tf.Session() as sess:
    t1 = time.time()
    mate = newmat.eval(feed_dict={mat: npmat})
    t2 = time.time()
    print(t2 - t1, mate)
    t3 = time.time()
    matee = newmat.eval(feed_dict={mat: npmat})
    t4 = time.time()
    print(t4 - t3, matee)

print("done")
Exemple #17
0
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

A = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
B = tf.constant([[5, 6], [7, 8], [9, 10]], dtype=tf.float32)
C = tf.SparseTensor([[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]],
                    tf.constant([5, 6, 7, 8, 9, 10], tf.float32), [3, 2])

r1 = tf.matmul(A, B, transpose_b=True)

rs = tf.sparse_tensor_dense_matmul(C, A, adjoint_b=True)
rs = tf.transpose(rs)

D = tf.sparse_tensor_to_dense(C)
r2 = tf.sparse_matmul(A, B, transpose_b=True, b_is_sparse=True)

sess = tf.Session()

Cd = tf.sparse_tensor_to_dense(C)
r3 = tf.matmul(A, Cd, b_is_sparse=True, transpose_b=True)

#Ct = tf.sparse_transpose(C)
Ct = C
Ci = tx.sparse_indices(Ct)
r4 = tf.nn.embedding_lookup_sparse(tf.transpose(A),
                                   sp_ids=Ci,
                                   sp_weights=Ct,
                                   combiner="sum")

print(sess.run(r1))
Exemple #18
0
    def __init__(self, config):
        author_layers = config.author_layers
        paper_layers = config.paper_layers
        author_layers_length = len(author_layers)
        paper_layers_length = len(paper_layers)
        author_input_size = author_layers[0]
        paper_input_size = paper_layers[0]
        l2_lambda = config.l2_lambda
        keep_prob = config.keep_prob
        gamma = config.gamma
        delta = config.delta
        alpha = config.alpha
        step_num = config.step_num
        hidden_num = config.hidden_num
        elem_num = config.elem_num
        pos_br = config.pos_br

        self.pos_h = tf.placeholder(tf.float32, [None, author_input_size], name = 'pos_h')
        self.pos_m = tf.placeholder(tf.float32, [None, paper_input_size], name = 'pos_m')
        self.pos_t = tf.placeholder(tf.float32, [None, author_input_size], name = 'pos_t')
        self.neg_h = tf.placeholder(tf.float32, [None, author_input_size], name = 'neg_h')
        self.neg_m = tf.placeholder(tf.float32, [None, paper_input_size], name = 'neg_m')
        self.neg_t = tf.placeholder(tf.float32, [None, author_input_size], name = 'neg_t')
        
        self.batch_size = tf.placeholder(tf.int32, [None], name = 'batch_size')
        self.pos_h_ids = tf.sparse_placeholder(tf.float32)
        self.pos_t_ids = tf.sparse_placeholder(tf.float32)
        self.neg_h_ids = tf.sparse_placeholder(tf.float32)
        self.neg_t_ids = tf.sparse_placeholder(tf.float32)

#***********************************************************************************************
        with tf.name_scope("ae"):
            cur_seed = random.getrandbits(32)
            encode = tf.get_variable(name = "encode", shape = [length, ae_d], initializer = tf.contrib.layers.xavier_initializer(uniform = False, seed=cur_seed))
            encode_b = tf.get_variable(name="encode_b", initializer = tf.zeros([ae_d]))
            decode = tf.get_variable(name = "decode", shape = [ae_d, length], initializer = tf.contrib.layers.xavier_initializer(uniform = False, seed=cur_seed))
            decode_b = tf.get_variable(name="decode_b", initializer = tf.zeros([length]))

            self.pos_h_ids1 = tf.sparse_tensor_to_dense(self.pos_h_ids, validate_indices = False)
            self.pos_t_ids1 = tf.sparse_tensor_to_dense(self.pos_t_ids, validate_indices = False)
            self.neg_h_ids1 = tf.sparse_tensor_to_dense(self.neg_h_ids, validate_indices = False)
            self.neg_t_ids1 = tf.sparse_tensor_to_dense(self.neg_t_ids, validate_indices = False)

            encode_pos_h  = tf.nn.relu(tf.sparse_matmul(self.pos_h_ids1, encode, a_is_sparse = True) + encode_b)
            decode_pos_h  = tf.nn.relu(tf.sparse_matmul(encode_pos_h, decode) + decode_b)
            encode_pos_t  = tf.nn.relu(tf.sparse_matmul(self.pos_t_ids1, encode, a_is_sparse = True) + encode_b)
            decode_pos_t  = tf.nn.relu(tf.sparse_matmul(encode_pos_t, decode) + decode_b)
            encode_neg_h  = tf.nn.relu(tf.sparse_matmul(self.neg_h_ids1, encode, a_is_sparse = True) + encode_b)
            decode_neg_h  = tf.nn.relu(tf.sparse_matmul(encode_neg_h, decode) + decode_b)
            encode_neg_t  = tf.nn.relu(tf.sparse_matmul(self.neg_t_ids1, encode, a_is_sparse = True) + encode_b)
            decode_neg_t  = tf.nn.relu(tf.sparse_matmul(encode_neg_t, decode) + decode_b)

            self.ae_loss = 0.0

            self.ae_loss_init = tf.reduce_sum(abs(tf.multiply(self.pos_h_ids1 - decode_pos_h,  tf.multiply(pos_br, tf.sign(self.pos_h_ids1)))))
            self.ae_loss = self.ae_loss_init + tf.reduce_sum(abs(tf.multiply(self.pos_t_ids1 - decode_pos_t,  tf.multiply(pos_br, tf.sign(self.pos_t_ids1)))))
            self.ae_loss += tf.reduce_sum(abs(tf.multiply(self.neg_h_ids1 - decode_neg_h,  tf.multiply(pos_br, tf.sign(self.neg_h_ids1)))))
            self.ae_loss += tf.reduce_sum(abs(tf.multiply(self.neg_t_ids1 - decode_neg_t,  tf.multiply(pos_br, tf.sign(self.neg_t_ids1)))))
        with tf.name_scope("mlp"):
            self.relation_W_a = []
            self.relation_b_a = []
            self.relation_W_p = []
            self.relation_b_p = []
            self.pos_a1_hidden = []
            self.pos_a2_hidden = []
            self.pos_p_hidden = []
            self.pos_r_hidden_test = []
            self.neg_a1_hidden = []
            self.neg_a2_hidden = []
            self.neg_p_hidden = []
            self.mlp_l2_loss = 0.0

            # author mlp
            for i in range(author_layers_length - 1):
                cur_seed = random.getrandbits(32)
                self.relation_W_a.append(tf.get_variable(name = "relation_W_a"+str(i), shape = [author_layers[i], author_layers[i+1]], initializer = tf.contrib.layers.xavier_initializer(uniform = False, seed=cur_seed)))
                self.relation_b_a.append(tf.get_variable(name="relation_b_a"+str(i), initializer = tf.zeros([author_layers[i+1]])))
                self.mlp_l2_loss += tf.nn.l2_loss(self.relation_W_a[i])+tf.nn.l2_loss(self.relation_b_a[i])
                # feed pos_h into mlp
                if i == 0:
                    layers_pos_a1  = tf.nn.relu(tf.matmul(self.pos_h, self.relation_W_a[i]) + self.relation_b_a[i])
                    layers_neg_a1  = tf.nn.relu(tf.matmul(self.neg_h, self.relation_W_a[i]) + self.relation_b_a[i])
                elif i == author_layers_length - 2:
                    layers_pos_a1 = tf.matmul(self.pos_a1_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i]
                    layers_neg_a1 = tf.matmul(self.neg_a1_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i]
                else:
                    layers_pos_a1 = tf.nn.relu(tf.matmul(self.pos_a1_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i])
                    layers_neg_a1 = tf.nn.relu(tf.matmul(self.neg_a1_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i])
                if i == (author_layers_length-3)/2:
                    cur_seed = random.getrandbits(32)
                    self.pos_a1_rep = tf.nn.dropout(layers_pos_a1, keep_prob, seed=cur_seed)
                    cur_seed = random.getrandbits(32)
                    self.neg_a1_rep = tf.nn.dropout(layers_neg_a1, keep_prob, seed=cur_seed)
                    self.pos_a1_hidden.append(self.pos_a1_rep)
                    self.neg_a1_hidden.append(self.neg_a1_rep)
                else:
                    self.pos_a1_hidden.append(layers_pos_a1)
                    self.neg_a1_hidden.append(layers_neg_a1)                  
            for i in range(author_layers_length - 1):
                cur_seed = random.getrandbits(32)
                
                # feed pos_h into mlp
                if i == 0:
                    layers_pos_a2  = tf.nn.relu(tf.matmul(self.pos_t, self.relation_W_a[i]) + self.relation_b_a[i])
                    layers_neg_a2  = tf.nn.relu(tf.matmul(self.neg_t, self.relation_W_a[i])+ self.relation_b_a[i])
                elif i == author_layers_length - 2:
                    layers_pos_a2 = tf.matmul(self.pos_a2_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i]
                    layers_neg_a2 = tf.matmul(self.neg_a2_hidden[i-1], self.relation_W_a[i]) + self.relation_b_a[i]
                else:
                    layers_pos_a2 = tf.nn.relu(tf.matmul(self.pos_a2_hidden[i-1], self.relation_W_a[i])+self.relation_b_a[i])
                    layers_neg_a2 = tf.nn.relu(tf.matmul(self.neg_a2_hidden[i-1], self.relation_W_a[i])+self.relation_b_a[i])
                if i == (author_layers_length-3)/2:
                    cur_seed = random.getrandbits(32)
                    self.pos_a2_rep = tf.nn.dropout(layers_pos_a2, keep_prob, seed=cur_seed)
                    cur_seed = random.getrandbits(32)
                    self.neg_a2_rep = tf.nn.dropout(layers_neg_a2, keep_prob, seed=cur_seed)
                    self.pos_a2_hidden.append(self.pos_a2_rep)
                    self.neg_a2_hidden.append(self.neg_a2_rep)
                else:
                    self.pos_a2_hidden.append(layers_pos_a2)
                    self.neg_a2_hidden.append(layers_neg_a2)
            for i in range(paper_layers_length - 1):
                cur_seed = random.getrandbits(32)
                self.relation_W_p.append(tf.get_variable(name = "relation_W_p"+str(i), shape = [paper_layers[i], paper_layers[i+1]], initializer = tf.contrib.layers.xavier_initializer(uniform = False, seed=cur_seed)))
                self.relation_b_p.append(tf.get_variable(name="relation_b_p"+str(i), initializer = tf.zeros([paper_layers[i+1]])))
                self.mlp_l2_loss += tf.nn.l2_loss(self.relation_W_p[i])+tf.nn.l2_loss(self.relation_b_p[i])      
                # feed pos_m into mlp
                if i == 0:
                    layers_pos_p  = tf.nn.relu(tf.matmul(self.pos_m, self.relation_W_p[i]) + self.relation_b_p[i])
                    layers_neg_p  = tf.nn.relu(tf.matmul(self.neg_m, self.relation_W_p[i]) + self.relation_b_p[i])
                elif i == paper_layers_length - 2:
                    layers_pos_p = tf.matmul(self.pos_p_hidden[i-1], self.relation_W_p[i])+ self.relation_b_p[i]
                    layers_neg_p = tf.matmul(self.neg_p_hidden[i-1], self.relation_W_p[i]) + self.relation_b_p[i]
                else:
                    layers_pos_p = tf.nn.relu(tf.matmul(self.pos_p_hidden[i-1], self.relation_W_p[i]) + self.relation_b_p[i])
                    layers_neg_p = tf.nn.relu(tf.matmul(self.neg_p_hidden[i-1], self.relation_W_p[i]) + self.relation_b_p[i])
  
                if i == (paper_layers_length-3)/2:
                    cur_seed = random.getrandbits(32)
                    self.pos_p_rep = tf.nn.dropout(layers_pos_p, keep_prob, seed=cur_seed)
                    cur_seed = random.getrandbits(32)
                    self.neg_p_rep = tf.nn.dropout(layers_neg_p, keep_prob, seed=cur_seed)
                    self.pos_p_hidden.append(self.pos_p_rep)
                    self.neg_p_hidden.append(self.neg_p_rep)
                else:
                    self.pos_p_hidden.append(layers_pos_p)
                    self.neg_p_hidden.append(layers_neg_p)
            self.a1_embed = self.pos_a1_hidden[-1]
            self.p_embed = self.pos_p_hidden[-1]
            self.a2_embed = self.pos_a2_hidden[-1]
        with tf.name_scope('concate'):
            self.encode_pos_h = encode_pos_h
            con_pos_h = tf.concat([self.pos_a1_hidden[-1], encode_pos_h], 1)
            con_pos_t = tf.concat([self.pos_a2_hidden[-1], encode_pos_t], 1)
            con_neg_h = tf.concat([self.neg_a1_hidden[-1], encode_neg_h], 1)
            con_neg_t = tf.concat([self.neg_a2_hidden[-1], encode_neg_t], 1)
            self.con_pos_h = con_pos_h

            '''
        with tf.name_scope("node_lookup"):
            cur_seed = random.getrandbits(32)
            embeddings = tf.get_variable(name = "embeddings", shape = [entityTotal, author_layers[-1]], initializer = tf.contrib.layers.xavier_initializer(uniform = False, seed=cur_seed))
            pos_h_e = tf.nn.embedding_lookup(embeddings, self.pos_h_ids)
            pos_m_e = tf.nn.embedding_lookup(embeddings, self.pos_m_ids)
            pos_t_e = tf.nn.embedding_lookup(embeddings, self.pos_t_ids)
            neg_h_e = tf.nn.embedding_lookup(embeddings, self.neg_h_ids)
            neg_m_e = tf.nn.embedding_lookup(embeddings, self.neg_m_ids)
            neg_t_e = tf.nn.embedding_lookup(embeddings, self.neg_t_ids)
        with tf.name_scope("max"):
            new_pos_h = tf.maximum(pos_h_e, self.pos_a1_hidden[-1])
            new_pos_m = tf.maximum(pos_m_e, self.pos_p_hidden[-1])
            new_pos_t = tf.maximum(pos_t_e, self.pos_a2_hidden[-1])
            new_neg_h = tf.maximum(neg_h_e, self.neg_a1_hidden[-1])
            new_neg_m = tf.maximum(neg_m_e, self.neg_p_hidden[-1])
            new_neg_t = tf.maximum(neg_t_e, self.neg_a2_hidden[-1])

            self.update_pos_h = tf.scatter_update(embeddings, self.pos_h_ids, new_pos_h)
            self.update_pos_m = tf.scatter_update(embeddings, self.pos_m_ids, new_pos_m)
            self.update_pos_t = tf.scatter_update(embeddings, self.pos_t_ids, new_pos_t)
            self.update_neg_h = tf.scatter_update(embeddings, self.neg_h_ids, new_neg_h)
            self.update_neg_m = tf.scatter_update(embeddings, self.neg_m_ids, new_neg_m)
            self.update_neg_t = tf.scatter_update(embeddings, self.neg_t_ids, new_neg_t)
            self.update = [self.update_pos_h, self.update_pos_m, self.update_pos_t,
            self.update_neg_h, self.update_neg_m, self.update_neg_t]
            '''
        with tf.variable_scope("ape", reuse = None) as vs:
        #    vs.reuse_variables()

            inputs_pos = tf.concat([tf.reduce_sum(tf.multiply(con_pos_h, self.pos_p_hidden[-1]),axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(con_pos_h, con_pos_t), axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(self.pos_p_hidden[-1], con_pos_t),axis = 1,  keep_dims = True)], 1)
            inputs_neg = tf.concat([tf.reduce_sum(tf.multiply(con_neg_h, self.neg_p_hidden[-1]),axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(con_neg_h, con_neg_t), axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(self.neg_p_hidden[-1], con_neg_t),axis = 1,  keep_dims = True)], 1)
            self.inputs_pos = inputs_pos
            self.inputs_neg = inputs_neg

            if config.no_weight:
                merge_layers = tf.contrib.keras.layers.Dense(1, kernel_initializer = tf.ones_initializer(), trainable = False, name = 'merge_pos')
                merge_pos = merge_layers(inputs_pos)
                merge_pos = merge_layers(inputs_neg)
                b_pos = tf.get_variable(name="b_pos", initializer = tf.zeros(1))
                merge_pos_new = merge_pos + b_pos
                
                merge_neg = tf.contrib.keras.layers.Dense(1, kernel_initializer = tf.ones_initializer(), trainable = False, name = 'merge_pos')(inputs_pos)
                b_neg = tf.get_variable(name="b_pos", initializer = tf.zeros(1))
                merge_neg_new = merge_neg + b_neg
            else:
                print('inputs_pos', inputs_pos)
                merge_layers = tf.contrib.keras.layers.Dense(1, kernel_initializer = tf.ones_initializer(),kernel_constraint = 'NonNeg', trainable = True, name = 'merge_pos')
                self.merge_pos_new = merge_layers(inputs_pos)
                self.merge_neg_new = merge_layers(inputs_neg)
                print(self.merge_pos_new)
            outlier_loss_pos = tf.reduce_sum(tf.log(tf.clip_by_value(tf.sigmoid(tf.multiply(self.merge_pos_new, tf.ones_like(self.merge_pos_new))), 1e-8, 1.0 )))
            outlier_loss = outlier_loss_pos + tf.reduce_sum(tf.log(tf.clip_by_value(tf.sigmoid(tf.multiply(self.merge_neg_new, tf.multiply(-1.0, tf.ones_like(self.merge_neg_new)))), 1e-8, 1.0)))
            self.outlier_loss = tf.multiply(-1.0, outlier_loss)
        #    pos_h_test = tf.nn.embedding_lookup(embeddings, self.pos_h_ids)
        #    pos_m_test = tf.nn.embedding_lookup(embeddings, self.pos_m_ids)
        #    pos_t_test = tf.nn.embedding_lookup(embeddings, self.pos_t_ids)
            inputs_test = tf.concat([tf.reduce_sum(tf.multiply(con_pos_h, self.pos_p_hidden[-1]),axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(con_pos_h, con_pos_t), axis = 1, keep_dims = True), tf.reduce_sum(tf.multiply(self.pos_p_hidden[-1], con_pos_t),axis = 1,  keep_dims = True)], 1)
            self.outlier_score = merge_layers(inputs_test)
            self.loss = self.outlier_loss + alpha * self.ae_loss + l2_lambda * self.mlp_l2_loss