예제 #1
0
    def get_probabilities(self, utility, lambda_k):
        """
            This method calculates the probability of choosing an object from the query set using the following parameters of the model which are used:

                * **weights** (:math:`w`): Weights to get the utility of the object :math:`Y_i = U(x_i) = w \\cdot x_i`
                * **lambda_k** (:math:`\\lambda_k`): Lambda is the measure of independence amongst the obejcts in the nest :math:`B_k`

            The probability of choosing the object  :math:`x_i` from the query set :math:`Q`:

            .. math::
                    P_i = \\sum_{j \\in I \\setminus i} P_{{i} \\lvert {ij}} P_{ij} \\enspace where, \\\\
                    P_{i \\lvert ij} = \\frac{\\boldsymbol{e}^{^{Y_i} /_{\\lambda_{ij}}}}{\\boldsymbol{e}^{^{Y_i} /_{\\lambda_{ij}}} + \\boldsymbol{e}^{^{Y_j} /_{\\lambda_{ij}}}} \\enspace ,\\\\
                    P_{ij} = \\frac{{\\left( \\boldsymbol{e}^{^{V_i}/{\\lambda_{ij}}} + \\boldsymbol{e}^{^{V_j}/{\\lambda_{ij}}}  \\right)}^{\\lambda_{ij}}}{\\sum_{k=1}^{n-1} \\sum_{\\ell = k + 1}^{n} {\\left( \\boldsymbol{e}^{^{V_k}/{\\lambda_{k\\ell}}} + \\boldsymbol{e}^{^{V_{\\ell}}/{\\lambda_{k\\ell}}}  \\right)}^{\\lambda_{k\\ell}}}


            Parameters
            ----------
            utility : theano tensor
                (n_instances, n_objects)
                Utility :math:`Y_i` of the objects :math:`x_i \\in Q` in the query sets
            lambda_k : theano tensor (range : [alpha, 1.0])
                (n_nests)
                Measure of independence amongst the obejcts in each nests

            Returns
            -------
            p : theano tensor
                (n_instances, n_objects)
                Choice probabilities :math:`P_i` of the objects :math:`x_i \\in Q` in the query sets

        """
        n_objects = self.n_objects
        nests_indices = self.nests_indices
        n_nests = self.n_nests
        lambdas = tt.ones((n_objects, n_objects), dtype=np.float)
        for i, p in enumerate(nests_indices):
            r = [p[0], p[1]]
            c = [p[1], p[0]]
            lambdas = tt.set_subtensor(lambdas[r, c], lambda_k[i])
        uti_per_nest = tt.transpose(utility[:, None, :] / lambdas, (0, 2, 1))
        ind = np.array([[[i1, i2], [i2, i1]] for i1, i2 in nests_indices])
        ind = ind.reshape(2 * n_nests, 2)
        x = uti_per_nest[:, ind[:, 0], ind[:, 1]].reshape((-1, 2))
        log_sum_exp_nest = ttu.logsumexp(x).reshape((-1, n_nests))
        pnk = tt.exp(log_sum_exp_nest * lambda_k -
                     ttu.logsumexp(log_sum_exp_nest * lambda_k))
        p = tt.zeros(tuple(utility.shape), dtype=float)
        for i in range(n_nests):
            i1, i2 = nests_indices[i]
            x1 = tt.exp(uti_per_nest[:, i1, i2] -
                        log_sum_exp_nest[:, i]) * pnk[:, i]
            x2 = np.exp(uti_per_nest[:, i2, i1] -
                        log_sum_exp_nest[:, i]) * pnk[:, i]
            p = tt.set_subtensor(p[:, i1], p[:, i1] + x1)
            p = tt.set_subtensor(p[:, i2], p[:, i2] + x2)
        return p
예제 #2
0
    def get_probabilities(self, utility, lambda_k, utility_k):
        """
            This method calculates the probability of choosing an object from the query set using the following parameters of the model which are used:

                * **weights** (:math:`w`): Weights to get the utility of the object :math:`Y_i = U(x_i) = w \\cdot x_i`
                * **weights_k** (:math:`w_k`): Weights to get the utility of the next  :math:`W_k = U_k(x) = w_k \\cdot c_k`, where :math:`c_k` is the center of the object space of nest :math:`B_k`
                * **lambda_k** (:math:`\\lambda_k`): Lambda is the measure of independence amongst the obejcts in the nest :math:`B_k`

            The probability of choosing the object  :math:`x_i` from the query set :math:`Q`:

            .. math::
                P_i = \\frac{\\boldsymbol{e}^{ ^{Y_i} /_{\\lambda_k}}}{\\sum_{j \\in B_k} \\boldsymbol{e}^{^{Y_j} /_{\\lambda_k}}} \\frac {\\boldsymbol{e}^{W_k + \\lambda_k I_k}} {\\sum_{\\ell = 1}^{K} \\boldsymbol{e}^{ W_{\\ell } + \\lambda_{\\ell} I_{\\ell}}} \\quad i \\in B_k  \\enspace , \\\\
                where,\\enspace I_k = \\ln \\sum_{ j \\in B_k} \\boldsymbol{e}^{^{Y_j} /_{\\lambda_k}}


            Parameters
            ----------
            utility : theano tensor
                (n_instances, n_objects)
                Utility :math:`Y_i` of the objects :math:`x_i \\in Q` in the query sets
            lambda_k : theano tensor (range : [alpha, 1.0])
                (n_nests)
                Measure of independence amongst the obejcts in each nests
            utility_k : theano tensor
                (n_instances, n_nests)
                Utilities of the nests :math:`B_k \\in \\mathcal{B}`

            Returns
            -------
            p : theano tensor
                (n_instances, n_objects)
                Choice probabilities :math:`P_i` of the objects :math:`x_i \\in Q` in the query sets

        """
        n_instances, n_objects = self.y_nests_.shape
        pni_k = tt.zeros((n_instances, n_objects))
        ivm = tt.zeros((n_instances, self.n_nests))
        for i in range(self.n_nests):
            rows, cols = tt.neq(self.y_nests_, i).nonzero()
            sub_tensor = tt.set_subtensor(utility[rows, cols], -1e50)
            ink = ttu.logsumexp(sub_tensor)
            rows, cols = tt.eq(self.y_nests_, i).nonzero()
            pni_k = tt.set_subtensor(pni_k[rows, cols],
                                     tt.exp(sub_tensor - ink)[rows, cols])
            ivm = tt.set_subtensor(ivm[:, i],
                                   lambda_k[i] * ink[:, 0] + utility_k[i])
        pk = tt.exp(ivm - ttu.logsumexp(ivm))
        pn_k = tt.zeros((n_instances, n_objects))
        for i in range(self.n_nests):
            rows, cols = tt.eq(self.y_nests_, i).nonzero()
            p = tt.ones((n_instances, n_objects)) * pk[:, i][:, None]
            pn_k = tt.set_subtensor(pn_k[rows, cols], p[rows, cols])
        p = pni_k * pn_k
        return p
예제 #3
0
    def get_probabilities(self, utility, lambda_k, alpha_ik):
        """
            This method calculates the probability of choosing an object from the query set using the following parameters of the model which are used:

                * **weights** (:math:`w`): Weights to get the utility of the object :math:`Y_i = U(x_i) = w \\cdot x_i`
                * **weights_k** (:math:`w_k`): Weights to get fractional allocation of each object :math:'x_j'  in :math:'Q' to each nest math:`B_k` as :math:`\\alpha_{ik} = w_k \\cdot x_i`.
                * **lambda_k** (:math:`\\lambda_k`): Lambda for nest :math:`B_k` for correlations between the obejcts.

            The probability of choosing the object :math:`x_i` from the query set :math:`Q`:

            .. math::
                P_i = \\sum_{\\substack{B_k \\in \\mathcal{B} \\ i \\in B_k}} P_{i \\lvert {B_k}} P_{B_k} \\enspace where, \\\\
                P_{B_k} = \\frac{{\\left(\\sum_{j \\in B_k} {\\left(\\alpha_{jk} \\boldsymbol{e}^{V_j} \\right)}^ {^{1}/{\\lambda_k}} \\right)}^{\\lambda_k}}{\\sum_{\\ell = 1}^{K} {\\left( \\sum_{j \\in B_{\\ell}} {\\left( \\alpha_{j\\ell} \\boldsymbol{e}^{V_j} \\right)}^{^{1}/{\\lambda_\\ell}} \\right)^{\\lambda_{\\ell}}}} \\\\
                P_{{i} \\lvert {B_k}} = \\frac{{\\left(\\alpha_{ik} \\boldsymbol{e}^{V_i} \\right)}^{^{1}/{\\lambda_k}}}{\\sum_{j \\in B_k} {\\left(\\alpha_{jk} \\boldsymbol{e}^{V_j} \\right)}^{^{1}/{\\lambda_k}}} \\enspace ,


            Parameters
            ----------
            utility : theano tensor
                (n_instances, n_objects)
                Utility :math:`Y_i` of the objects :math:`x_i \\in Q` in the query sets
            lambda_k : theano tensor (range : [alpha, 1.0])
                (n_nests)
                Measure of independence amongst the obejcts in each nests
            alpha_ik : theano tensor
                (n_instances, n_objects, n_nests)
                Fractional allocation of each object :math:`x_i` in each nest math:`B_k`

            Returns
            -------
            p : theano tensor
                (n_instances, n_objects)
                Choice probabilities :math:`P_i` of the objects :math:`x_i \\in Q` in the query sets

        """
        n_nests = self.n_nests
        n_instances, n_objects = utility.shape
        pik = tt.zeros((n_instances, n_objects, n_nests))
        sum_per_nest = tt.zeros((n_instances, n_nests))
        for i in range(n_nests):
            uti = (utility + tt.log(alpha_ik[:, :, i])) * 1 / lambda_k[i]
            sum_n = ttu.logsumexp(uti)
            pik = tt.set_subtensor(pik[:, :, i], tt.exp(uti - sum_n))
            sum_per_nest = tt.set_subtensor(
                sum_per_nest[:, i], sum_n[:, 0] * lambda_k[i]
            )
        pnk = tt.exp(sum_per_nest - ttu.logsumexp(sum_per_nest))
        pnk = pnk[:, None, :]
        p = pik * pnk
        p = p.sum(axis=2)
        return p
예제 #4
0
 def get_probabilities(self, utility, lambda_k, alpha_ik):
     n_nests = self.n_nests
     n_instances, n_objects = utility.shape
     pik = tt.zeros((n_instances, n_objects, n_nests))
     sum_per_nest = tt.zeros((n_instances, n_nests))
     for i in range(n_nests):
         uti = (utility + tt.log(alpha_ik[:, :, i])) * 1 / lambda_k[i]
         sum_n = ttu.logsumexp(uti)
         pik = tt.set_subtensor(pik[:, :, i], tt.exp(uti - sum_n))
         sum_per_nest = tt.set_subtensor(sum_per_nest[:, i],
                                         sum_n[:, 0] * lambda_k[i])
     pnk = tt.exp(sum_per_nest - ttu.logsumexp(sum_per_nest))
     pnk = pnk[:, None, :]
     p = pik * pnk
     p = p.sum(axis=2)
     return p
예제 #5
0
 def get_probability(self, utility, lambda_k, utility_k):
     n_instances, n_objects = self.y_nests.shape
     pni_k = tt.zeros((n_instances, n_objects))
     ivm = tt.zeros((n_instances, self.n_nests))
     for i in range(self.n_nests):
         rows, cols = tt.neq(self.y_nests, i).nonzero()
         sub_tensor = tt.set_subtensor(utility[rows, cols], -1e50)
         ink = ttu.logsumexp(sub_tensor)
         rows, cols = tt.eq(self.y_nests, i).nonzero()
         pni_k = tt.set_subtensor(pni_k[rows, cols],
                                  tt.exp(sub_tensor - ink)[rows, cols])
         ivm = tt.set_subtensor(ivm[:, i],
                                lambda_k[i] * ink[:, 0] + utility_k[i])
     pk = tt.exp(ivm - ttu.logsumexp(ivm))
     pn_k = tt.zeros((n_instances, n_objects))
     for i in range(self.n_nests):
         rows, cols = tt.eq(self.y_nests, i).nonzero()
         p = tt.ones((n_instances, n_objects)) * pk[:, i][:, None]
         pn_k = tt.set_subtensor(pn_k[rows, cols], p[rows, cols])
     p = pni_k * pn_k
     return p
 def get_probabilities(self, utility, lambda_k):
     n_objects = self.n_objects
     nests_indices = self.nests_indices
     n_nests = self.n_nests
     lambdas = tt.ones((n_objects, n_objects), dtype=np.float)
     for i, p in enumerate(nests_indices):
         r = [p[0], p[1]]
         c = [p[1], p[0]]
         lambdas = tt.set_subtensor(lambdas[r, c], lambda_k[i])
     uti_per_nest = tt.transpose(utility[:, None, :] / lambdas, (0, 2, 1))
     ind = np.array([[[i1, i2], [i2, i1]] for i1, i2 in nests_indices])
     ind = ind.reshape(2 * n_nests, 2)
     x = uti_per_nest[:, ind[:, 0], ind[:, 1]].reshape((-1, 2))
     log_sum_exp_nest = ttu.logsumexp(x).reshape((-1, n_nests))
     pnk = tt.exp(log_sum_exp_nest * lambda_k - ttu.logsumexp(log_sum_exp_nest * lambda_k))
     p = tt.zeros(tuple(utility.shape), dtype=float)
     for i in range(n_nests):
         i1, i2 = nests_indices[i]
         x1 = tt.exp(uti_per_nest[:, i1, i2] - log_sum_exp_nest[:, i]) * pnk[:, i]
         x2 = np.exp(uti_per_nest[:, i2, i1] - log_sum_exp_nest[:, i]) * pnk[:, i]
         p = tt.set_subtensor(p[:, i1], p[:, i1] + x1)
         p = tt.set_subtensor(p[:, i2], p[:, i2] + x2)
     return p