def forward(self): # products # if self._current_iter == 0: # # break symmetry # # self.factors["product"].message_to_product = GaussianArray.from_array( # # mean=tf.random.normal((self.N, self.K), 0., 1.), # # variance=tf.ones((self.N, self.K)) * 10. # # ) * self.factors["latent_prior"].message_to_x # # self.nodes["product"] = self.factors["product"].message_to_product * \ # # self.factors["concatenate"].message_to_x["s_uv"] # # self.factors["product"].message_to_x = GaussianArray.from_array( # # mean=tf.random.normal((self.N, self.K), 0., 1.), # # variance=tf.ones((self.N, self.K)) * 10. # # ) # self.factors["latent_prior"].message_to_x = GaussianArray.from_array( # mean=tf.random.normal((self.N, self.K), 0., 1.), # variance=tf.ones((self.N, self.K)) * 1000. # ) # latent position self.nodes["latent"] = self.factors["latent_prior"].to_x() * self.factors["product"].message_to_x self.nodes["product"] = self.factors["product"].to_product( x=self.nodes["latent"] ) * self.factors["concatenate"].message_to_x["s_uv"] # heterogeneity to_alpha = \ self.factors["concatenate"].message_to_x["a_u"].product(0) * \ self.factors["concatenate"].message_to_x["a_v"].product(1) self.nodes["heterogeneity"] = self.factors["heterogeneity_prior"].message_to_x * to_alpha # concatenate alpha = self.nodes["heterogeneity"] x = { "a_u": GaussianArray( tf.tile(tf.expand_dims(alpha.precision(), 0), [self.N, 1, 1]), tf.tile(tf.expand_dims(alpha.mean_times_precision(), 0), [self.N, 1, 1]) ), "a_v": GaussianArray( tf.tile(tf.expand_dims(alpha.precision(), 1), [1, self.N, 1]), tf.tile(tf.expand_dims(alpha.mean_times_precision(), 1), [1, self.N, 1]) ), "s_uv": self.nodes["product"] } self.nodes["vector"] = self.factors["concatenate"].to_v(x) * \ self.factors["sum"].to_x(self.nodes["vector"], self.nodes["linear_predictor"]) # linear predictor self.nodes["linear_predictor"] = \ self.factors["sum"].to_sum(self.nodes["vector"]) * \ self.factors["noise"].message_to_mean # noizy linear predictor self.nodes["noisy_linear_predictor"] = self.factors["noise"].to_x( mean=self.nodes["linear_predictor"], variance=self.parameters["noise"]._value ) * self.factors["adjacency"].message_to_x
def to_parent(self): child = self.child / self.message_to_child p, mtp = child.natural() p0 = tf.slice(p, [0, 0, 0], [-1, -1, 1]) mtp0 = tf.slice(mtp, [0, 0, 0], [-1, -1, 1]) p1 = tf.slice(p, [0, 0, 1], [-1, -1, 1]) mtp1 = tf.slice(mtp, [0, 0, 1], [-1, -1, 1]) p = tf.reduce_sum(p0, 0) + tf.reduce_sum(p1, 1) mtp = tf.reduce_sum(mtp0, 0) + tf.reduce_sum(mtp1, 1) message_to_parent = GaussianArray(p, mtp) self.parent.update(self.message_to_parent, message_to_parent) self.message_to_parent = message_to_parent
def to_child(self): parent = self.parent / self.message_to_parent p, mtp = parent.natural() p0 = tf.tile(tf.expand_dims(p, 0), [self.N, 1, 1]) mtp0 = tf.tile(tf.expand_dims(mtp, 0), [self.N, 1, 1]) p1 = tf.tile(tf.expand_dims(p, 1), [1, self.N, 1]) mtp1 = tf.tile(tf.expand_dims(mtp, 1), [1, self.N, 1]) p = tf.concat([p0, p1], 2) mtp = tf.concat([mtp0, mtp1], 2) message_to_child = GaussianArray(p, mtp) self.child.update(self.message_to_child, message_to_child) self.message_to_child = message_to_child
def to_parent(self): # Remove previous message ? # x = self.parent / self.message_to_parent x = self.parent m, v = x.mean_and_variance() integrals = sigmoid_integrals(m, v, [0, 1]) sd = tf.math.sqrt(v) exp1 = m * integrals[0] + sd * integrals[1] p = (exp1 - m * integrals[0]) / v mtp = m * p + self.child.proba() - integrals[0] # nan case is stored as 0.5 p = tf.where(self.child.is_uniform(), 1.0e-10, p) mtp = tf.where(self.child.is_uniform(), 0., mtp) message_to_parent = GaussianArray(p, mtp) self.parent.update(self.message_to_parent, message_to_parent) self.message_to_parent = message_to_parent
def to_x(self, product, x): product = product / self.message_to_product x = x / self.message_to_x # here x contains the message from the other term in the product # message to x0 using x as the message from c1 p = product.precision() * tf.expand_dims(x.log_var() + x.mean() ** 2, 1) mtp = product.mean_times_precision() * tf.expand_dims(x.mean(), 1) p0 = tf.math.reduce_sum(p, 1) mtp0 = tf.math.reduce_sum(mtp, 1) # message to x1 using x as the message from x0 p = product.precision() * tf.expand_dims(x.log_var() + x.mean() ** 2, 0) mtp = product.mean_times_precision() * tf.expand_dims(x.mean(), 0) p1 = tf.math.reduce_sum(p, 0) mtp1 = tf.math.reduce_sum(mtp, 0) # product of messages self.message_to_x = GaussianArray(p0 + p1, mtp0 + mtp1) return self.message_to_x
def to_parent(self): product = self.child / self.message_to_child # ---------------------------------------- # update 0 # get marginal m1, v1 = self.parent.mean_and_variance() m1 = tf.expand_dims(m1, 1) v1 = tf.expand_dims(v1, 1) # compute new message p0 = product.precision() * (v1 + m1 ** 2) mtp0 = product.mean_times_precision() * m1 # store new messages p0prev, mtp0prev = self.message_to_parent0.natural() self.message_to_parent0 = GaussianArray(p0, mtp0) # accumulate p0sum = tf.math.reduce_sum(p0, 1) mtp0sum = tf.math.reduce_sum(mtp0, 1) psum = tf.math.reduce_sum(p0prev, 1) mtpsum = tf.math.reduce_sum(mtp0prev, 1) # update self.parent.update(GaussianArray(psum, mtpsum), GaussianArray(p0sum, mtp0sum)) # ---------------------------------------- # update 1 # get marginal m0, v0 = self.parent.mean_and_variance() m0 = tf.expand_dims(m0, 0) v0 = tf.expand_dims(v0, 0) # compute new message p1 = product.precision() * (v0 + m0 ** 2) mtp1 = product.mean_times_precision() * m0 # store new messages p1prev, mtp1prev = self.message_to_parent1.natural() self.message_to_parent1 = GaussianArray(p1, mtp1) # accumulate p1sum = tf.math.reduce_sum(p1, 0) mtp1sum = tf.math.reduce_sum(mtp1, 0) psum = tf.math.reduce_sum(p1prev, 0) mtpsum = tf.math.reduce_sum(mtp1prev, 0) # update self.parent.update(GaussianArray(psum, mtpsum), GaussianArray(p1sum, mtp1sum))