def encoder_item(self, embs_cate, w_list, b_list):
        embs_cate = tf.nn.l2_normalize(embs_cate, axis=1)
        embs_cnxt = tf.nn.l2_normalize(self.qInfer_network(w_list, b_list),
                                       axis=1)
        p_assign = tf.matmul(embs_cnxt, embs_cate, transpose_b=True) / self.tau

        if not self.gumbel:
            cates = tf.nn.softmax(p_assign, axis=1)
        else:
            cates_dist = RelaxedOneHotCategorical(1, p_assign)
            cates_sample = cates_dist.sample()
            cates_mode = tf.nn.softmax(p_assign, axis=1)
            cates = (self.is_training_ph * cates_sample +
                     (1 - self.is_training_ph) * cates_mode)

        # VAE based encoding
        z_list, cate_list = [], []
        kl = None
        for k in range(self.K):
            cates_k = tf.reshape(cates[:, k], (1, -1))
            cate_list.append(cates_k)

            # q-network
            x_k = self.input_ph * cates_k
            mu_k, std_k, kl_k = self.q_network(x_k, w_list, b_list)
            eps = tf.random_normal(tf.shape(std_k), dtype=tf.float64)
            z_k = mu_k + self.is_training_ph * eps * std_k
            z_list.append(z_k)
            kl = (kl_k if (kl is None) else (kl + kl_k))

        return z_list, cate_list, kl
Пример #2
0
    def _build_actor_network(self,
                             obs,
                             obs_space,
                             act_space,
                             hiddens,
                             activation=None,
                             scope=None):
        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as scope:
            if self.config["use_state_preprocessor"]:
                model = ModelCatalog.get_model(
                    {
                        "obs": obs,
                        "is_training": self._get_is_training_placeholder(),
                    }, obs_space, act_space, 1, self.config["model"])
                out = model.last_layer
            else:
                model = None
                out = obs

            for hidden in hiddens:
                out = tf.layers.dense(out, units=hidden, activation=activation)
            feature = tf.layers.dense(out,
                                      units=act_space.shape[0],
                                      activation=None)
            sampler = RelaxedOneHotCategorical(temperature=1.0,
                                               logits=feature).sample()

        return sampler, feature, model, tf.global_variables(scope.name)
    def encoder_attr(self, embs_cate, w_list, b_list):
        embs_cate = tf.nn.l2_normalize(embs_cate, axis=1)
        embs_cnxt = tf.nn.l2_normalize(self.qInfer_network_sparse(
            w_list, b_list),
                                       axis=1)
        p_assign = tf.matmul(embs_cnxt, embs_cate, transpose_b=True) / self.tau

        if not self.gumbel:
            cates = tf.nn.softmax(p_assign, axis=1)
        else:
            cates_dist = RelaxedOneHotCategorical(1, p_assign)
            cates_sample = cates_dist.sample()
            cates_mode = tf.nn.softmax(p_assign, axis=1)
            cates = (self.is_training_ph * cates_sample +
                     (1 - self.is_training_ph) * cates_mode)

        # VAE based encoding
        z_list = []
        zItem_list, cateItem_list = [], []
        kl = None
        x_input2attr = tf.sparse_tensor_dense_matmul(self.kg_mat,
                                                     self.input_ph,
                                                     adjoint_a=True,
                                                     adjoint_b=True)
        x_input2attr = tf.transpose(x_input2attr)
        for k in range(self.K):
            cates_k = tf.reshape(cates[:, k], (1, -1))

            # q-network for user aspects
            x_k = x_input2attr * cates_k
            mu_k, std_k, kl_k = self.q_network(x_k, w_list, b_list)
            eps = tf.random_normal(tf.shape(std_k), dtype=tf.float64)
            z_k = mu_k + self.is_training_ph * eps * std_k
            z_list.append(z_k)
            kl = (kl_k if (kl is None) else (kl + kl_k))

            # q-network for item aspects
            x_k = self.kg_mat.__mul__(cates_k)
            mu_k, std_k, kl_k = self.q_network_sparse(x_k, w_list, b_list)
            eps = tf.random_normal(tf.shape(std_k), dtype=tf.float64)
            z_k = mu_k + self.is_training_ph * eps * std_k
            zItem_list.append(z_k)
            cates_sum_k = tf.sparse_reduce_sum(x_k, axis=1)
            cates_sum_k = tf.reshape(cates_sum_k, (1, -1))
            cateItem_list.append(cates_sum_k / tf.reduce_sum(cates_sum_k))

        return z_list, zItem_list, cateItem_list, kl
Пример #4
0
    # rot_weights_logits = tf.layers.dense(inputs = 1/im_input_diff_mean_mat, units = 4, use_bias = False, activation = None)
    # pdb.set_trace()

# rot_weights_logits = 1/im_input_diff_mean_mat
    
    # rot_weights_logits = tf.layers.dense(inputs = flow_param_input, units = 4, use_bias = True, activation = None)

# # im_both = tf.concat([helper.tf_resize_image(im_input, resize_ratios=[1/16,1/16]), helper.tf_resize_image(im_target, resize_ratios=[1/16,1/16])], axis=-1)
# # with tf.variable_scope("eft_third", reuse=False):
# #     lay1_image = tf.layers.conv2d(inputs=im_both, filters=n_filters, kernel_size=[5, 5], strides=[2, 2], padding="valid", use_bias=True, activation=nonlinearity)
# #     lay2_image = tf.layers.conv2d(inputs=lay1_image, filters=2*n_filters, kernel_size=[5, 5], strides=[1, 1], padding="valid", use_bias=True, activation=nonlinearity)
# #     lay3_image = tf.layers.conv2d(inputs=lay2_image, filters=4*n_filters, kernel_size=[4, 4], strides=[1, 1], padding="valid", use_bias=True, activation=nonlinearity)
# # flow_param_input_2 = tf.reshape(lay3_image, [-1, lay3_image.get_shape()[1].value*lay3_image.get_shape()[2].value*4*n_filters])

rot_weights_temperature = temperature 
dist = RelaxedOneHotCategorical(rot_weights_temperature, logits=rot_weights_logits)
rot_weights = dist.sample()

im_input_90 = tf.image.rot90(im_input, k=1)
im_input_180 = tf.image.rot90(im_input, k=2)
im_input_270 = tf.image.rot90(im_input, k=3)
im_input_all_rotations = tf.concat([im_input[:,:,:,:,np.newaxis], im_input_90[:,:,:,:,np.newaxis], im_input_180[:,:,:,:,np.newaxis], im_input_270[:,:,:,:,np.newaxis]], axis=-1)
im_input_attended = tf.reduce_sum(rot_weights[:, np.newaxis, np.newaxis, np.newaxis, :]*im_input_all_rotations, axis=-1) 

# init = tf.initialize_all_variables()
# sess = tf.InteractiveSession()  
# sess.run(init)
# fd = {im_input: im_input_np, im_target: im_target_np, temperature:0.2}
# rot_weights_np, im_input_attended_np = sess.run([rot_weights, im_input_attended], feed_dict=fd)
# rot_weights_np, rot_weights_logits_np = sess.run([rot_weights, rot_weights_logits], feed_dict=fd)
# np.concatenate([rot_weights_np[0, :, np.newaxis],rot_weights_logits_np[0, :, np.newaxis], scipy.special.softmax(rot_weights_logits_np[0, :, np.newaxis])], axis=1)
Пример #5
0
def gumbelLogDensity(inputs, logits, temp):
    """ log density of a Gumbel distribution for tf inputs"""
    dist = Gumbel(temperature=temp, logits=logits)
    return dist.log_prob(inputs)
Пример #6
0
             activity_regularizer=None,
             kernel_constraint=None,
             bias_constraint=None,
             trainable=True,
             name=None,
             **kwargs):

        self.units = units
        self.activation = activation

        super(LearnableDropoutDense, self).__init__(name=name, **kwargs)
        self.kernel = self.add_variable(
            'kernel',
            shape=[input_shape[-1].value, self.units],
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            dtype=self.dtype,
            trainable=True)

    def call(self, inputs):
        inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
        outputs = standard_ops.matmul(inputs, self.output_kernel)


temperature = 1e-5
p = [0.1, 0.8, 0.1]
dist = RelaxedOneHotCategorical(temperature=temperature, probs=p)
r = dist.sample(sample_shape=1000)
print(r)