def __init__(self, dimensions, lr=1e-3):
		self.params = tf.Variable(tf.random_normal(shape=[D,1], mean=0.5, stddev=0.5), name="params")
		self.x = tf.Placeholder("float32", shape=(None, D), name="x")
		self.y = tf.Placeholder("float32",shape=(None, 1), name="x")
		linearComb = tf.matmul(x,params)
		loss = y-linearComb
		loss = tf.reduce_sum(loss * loss)

		# gradient = tf.gradients(loss, self.params)
		self.trainOp = tf.train.GradientDescentOptimizer(lr).minimize(loss)
		self.predictOp = linearComb

		i = tf.global_variable_initializer()
		self.session = tf.InteractiveSession()
		self.session.run(init)
Пример #2
0
	def __init__(**kwargs):
		self.kwargs = kwargs
		self.x = tf.Placeholder(kwargs.get("imgshape", [11,11,1,1]))
		self.pool = kwargs.get("pool", False)
		self.create_network(kwargs) #ABUSE OF KWARGS

		self.games = 0
		self.wins = 0
		self._update_score()
Пример #3
0
    def initModel(self):
        super(NGCF, self).initModel()

        regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)

        self.weights = dict()

        initializer = tf.contrib.layers.xavier_initializer()
        self.weight_size = [
            self.embed_size * 4, self.embed_size * 2, self.embed_size
        ]
        self.weight_size_list = [self.embed_size] + self.weight_size
        self.n_layers = 3

        for k in range(self.n_layers):
            self.weights['W_gc_%d' % k] = tf.Variable(initializer(
                [self.weight_size_list[k], self.weight_size_list[k + 1]]),
                                                      name='W_gc_%d' % k)
            self.weights['b_gc_%d' % k] = tf.Variable(initializer(
                [1, self.weight_size_list[k + 1]]),
                                                      name='b_gc_%d' % k)

            self.weights['W_bi_%d' % k] = tf.Variable(initializer(
                [self.weight_size_list[k], self.weight_size_list[k + 1]]),
                                                      name='W_bi_%d' % k)
            self.weights['b_bi_%d' % k] = tf.Variable(initializer(
                [1, self.weight_size_list[k + 1]]),
                                                      name='b_bi_%d' % k)

            self.weights['W_mlp_%d' % k] = tf.Variable(initializer(
                [self.weight_size_list[k], self.weight_size_list[k + 1]]),
                                                       name='W_mlp_%d' % k)
            self.weights['b_mlp_%d' % k] = tf.Variable(initializer(
                [1, self.weight_size_list[k + 1]]),
                                                       name='b_mlp_%d' % k)

        self.neighbors_u = tf.Placeholder(tf.int32, [None, self.num_items])
        self.neighbors_i = tf.Placeholder(tf.int32, [None, self.num_users])

        all_embeddings = [ego_embeddings]

        for k in range(0, self.n_layers):

            # sum messages of neighbors.
            side_embeddings = tf.concat(temp_embed, 0)
            # transformed sum messages of neighbors.
            sum_embeddings = tf.nn.leaky_relu(
                tf.matmul(side_embeddings, self.weights['W_gc_%d' % k]) +
                self.weights['b_gc_%d' % k])

            # bi messages of neighbors.
            bi_embeddings = tf.multiply(ego_embeddings, side_embeddings)
            # transformed bi messages of neighbors.
            bi_embeddings = tf.nn.leaky_relu(
                tf.matmul(bi_embeddings, self.weights['W_bi_%d' % k]) +
                self.weights['b_bi_%d' % k])

            # non-linear activation.
            ego_embeddings = sum_embeddings + bi_embeddings

            # message dropout.
            ego_embeddings = tf.nn.dropout(ego_embeddings,
                                           1 - self.prob_dropout[k])

            # normalize the distribution of embeddings.
            norm_embeddings = tf.math.l2_normalize(ego_embeddings, axis=1)

            all_embeddings += [norm_embeddings]

        all_embeddings = tf.concat(all_embeddings, 1)
        u_g_embeddings, i_g_embeddings = tf.split(
            all_embeddings, [self.num_users, self.n_items], 0)
        return u_g_embeddings, i_g_embeddings
Пример #4
0
        #extract
    with zipfile.ZipFile("local_zip_file",'r')as zip_ref:
        zip_ref.extractall("targetdir")

        # Start with a gray image with a little noise
        img_noise = np.random.uniform(size=(224,224,3)) + 100.0
            
		model_fn = 'tensorflow_inception_graph.pb'

		#step 2 - creating Tensorflow session and loading the model
		graph = tf.Graph()
		sess = tf.InteractiveSession(graph=graph)
		with tf.gfile.FastGFile(os.path.join(data_dir, model_fn), 'rb') as f:
			graph_def = tf.GraphDef()
			graph_def.ParseFromString(f.read())
		t_input = tf.Placeholder(np.float32, name='input') #define input tensor
		imagenet_mean = 117.0
		t_preprocessed - tf.expand_dims(t_input-imagenet_mean, 0)
		tf.import_graph_def(graph_def, {'input': t_preprocessed})

		layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
		feature_nums = [init(graph.get_tensor_by_name(name+':0').get_shape()[-1] for )]

		print('Number of layers', len(layers))
		print('Total number of feature channels:' sum(feature_nums))
        
        #helper functions for tf graph visualization
        #pylint: disable=unused-variable
    def strip_consts(graph_def, max_const_size=32):
    """Strip large constant values from graph_def."""
        strip_def =tf.GraphDef()
Пример #5
0
import tensorflow as tf
import pandas as pd

aux = tf.Placeholder(tf.float32)
a = tf.variable(3.0, type=tf.float32)

sess = tf.Session()
print(sess.run(a))
Пример #6
0
    def initModel(self):
        super(NGCF, self).initModel()


        self.u_neighbors_matrix = tf.placeholder(tf.int32, [None, self.num_items], name="u_n_idx")
        self.i_Neighbors_matrix = tf.placeholder(tf.int32, [None, self.num_users], name="i_n_idx")
        self.j_idx = tf.placeholder(tf.int32, [None], name="j_idx")
        self.p_u = tf.placeholder(tf.int32, [None], name="j_idx")
        self.p_i = tf.placeholder(tf.int32, [None], name="j_idx")
        self.p_j = tf.placeholder(tf.int32, [None], name="j_idx")

        decay_u = np.zeros(self.num_users)

        for user in self.data.user:
            uid = self.data.user[user]
            decay_u[uid] = sqrt(len(self.data.trainSet_u[user]))
        decay_u = tf.convert_to_tensor(decay_u)
        decay_i = np.zeros(self.num_items)

        for item in self.data.item:
            iid = self.data.user[item]
            decay_i[iid] = sqrt(len(self.data.trainSet_i[item]))
        decay_i = tf.convert_to_tensor(decay_i)
        self.variables = dict()

        initializer = tf.contrib.layers.xavier_initializer()
        weight_size = [self.embed_size*4,self.embed_size*2,self.embed_size]
        weight_size_list = [self.embed_size] + weight_size

        self.n_layers = 3

        #initialize parameters
        for k in range(self.n_layers):
            self.variables['W_%d_1' % k] = tf.Variable(
                initializer([weight_size_list[k], weight_size_list[k + 1]]), name='W_%d_1' % k)
            self.variables['W_%d_2' % k] = tf.Variable(
                initializer([weight_size_list[k], weight_size_list[k + 1]]), name='W_%d_2' % k)

        # for k in range(-1,self.n_layers):
        #     self.variables['user_embeddings_%d' % k] = tf.Variable(
        #         tf.truncated_normal(shape=[self.num_users, self.embed_size], stddev=0.005),
        #         name='user_embeddings_d' % k)
        #     self.variables['item_embeddings_%d' % k] = tf.Variable(
        #         tf.truncated_normal(shape=[self.num_items, self.embed_size], stddev=0.005),
        #         name='user_embeddings_d' % k)
        #     self.variables['u_embedding_%d'] = tf.nn.embedding_lookup(self.variables['user_embeddings_%d' % k],
        #                                                               self.u_idx)
        #     self.variables['v_embedding_%d'] = tf.nn.embedding_lookup(self.variables['item_embeddings_%d' % k],
        #                                                               self.v_idx)
        #     self.variables['j_embedding_%d'] = tf.nn.embedding_lookup(self.variables['item_embeddings_%d' % k],
        #                                                               self.j_idx)

        self.neighbors_u = tf.Placeholder(tf.int32,[None,self.num_items])
        self.neighbors_v = tf.Placeholder(tf.int32,[None,self.num_users])
        self.neighbors_j = tf.Placeholder(tf.int32,[None,self.num_users])

        all_embeddings =
        for k in range(0,self.n_layers):

            # aggregate messages of items.
            sum_item_messages = tf.matmul(self.neighbors_u,self.variables['item_embeddings_%d' %(k-1)]/decay_i)
            W_1_e_i = tf.matmul(self.variables['W_%d_1' % k],sum_item_messages,transpose_b=True)
            sum_item_messages = tf.multiply(self.variables['u_embedding_%d' %(k-1)]/self.p_u,sum_item_messages)
            sum_item_messages = tf.matmul(self.variables['W_%d_2' % k],sum_item_messages,transpose_b=True)
            sum_item_messages += W_1_e_i
            e_u = tf.nn.leaky_relu(tf.matmul(self.variables['W_%d_1' % k],self.variables['u_embedding_%d' %(k-1)],
                                             transpose_b=True)+sum_item_messages)

            # aggregate messages of positive item.
            sum_user_messages = tf.matmul(self.neighbors_v, self.variables['user_embeddings_%d' %(k-1)] / decay_u)
            W_1_e_u = tf.matmul(self.variables['W_%d_1' % k], sum_user_messages, transpose_b=True)
            sum_user_messages = tf.multiply(self.variables['v_embedding_%d' %(k-1)] / self.p_i, sum_user_messages)
            sum_user_messages = tf.matmul(self.variables['W_%d_2' % k], sum_user_messages, transpose_b=True)
            sum_user_messages += W_1_e_u
            e_i = tf.nn.leaky_relu(tf.matmul(self.variables['W_%d_1' % k], self.variables['v_embedding_%d' %(k-1)],
                                             transpose_b=True) + sum_user_messages)

            # aggregate messages of negative item.
            sum_user_messages = tf.matmul(self.neighbors_j, self.variables['user_embeddings_%d' % %(k-1)] / decay_u)
            W_1_e_u = tf.matmul(self.variables['W_%d_1' % k], sum_user_messages, transpose_b=True)
            sum_user_messages = tf.multiply(self.variables['j_embedding_%d' %(k-1)] / self.p_j, sum_user_messages)
            sum_user_messages = tf.matmul(self.variables['W_%d_2' % k], sum_user_messages, transpose_b=True)
            sum_user_messages += W_1_e_u
            e_j = tf.nn.leaky_relu(tf.matmul(self.variables['W_%d_1' % k], self.variables['j_embedding_%d' %(k-1)],
                                             transpose_b=True) + sum_user_messages)