Exemplo n.º 1
0
def pnorm_canon(expr, args):
    x = args[0]
    p = expr.p
    axis = expr.axis
    shape = expr.shape
    t = Variable(shape)

    if p == 2:
        if axis is None:
            assert shape == tuple()
            return t, [SOC(t, vec(x))]
        else:
            return t, [SOC(vec(t), x, axis)]

    # we need an absolute value constraint for the symmetric convex branches
    # (p > 1)
    constraints = []
    if p > 1:
        # TODO(akshayka): Express this more naturally (recursively), in terms
        # of the other atoms
        abs_expr = abs(x)
        abs_x, abs_constraints = abs_canon(abs_expr, abs_expr.args)
        x = abs_x
        constraints += abs_constraints

    # now, we take care of the remaining convex and concave branches
    # to create the rational powers, we need a new variable, r, and
    # the constraint sum(r) == t
    r = Variable(x.shape)
    constraints += [sum(r) == t]

    # todo: no need to run gm_constr to form the tree each time.
    # we only need to form the tree once
    promoted_t = Constant(np.ones(x.shape)) * t
    p = Fraction(p)
    if p < 0:
        constraints += gm_constrs(promoted_t, [x, r],
                                  (-p / (1 - p), 1 / (1 - p)))
    if 0 < p < 1:
        constraints += gm_constrs(r, [x, promoted_t], (p, 1 - p))
    if p > 1:
        constraints += gm_constrs(x, [r, promoted_t], (1 / p, 1 - 1 / p))

    return t, constraints
Exemplo n.º 2
0
def geo_mean_canon(expr, args):
    x = args[0]
    w = expr.w
    shape = expr.shape
    t = Variable(shape)

    x_list = [x[i] for i in range(len(w))]

    # todo: catch cases where we have (0, 0, 1)?
    # todo: what about curvature case (should be affine) in trivial
    #       case of (0, 0 , 1)?
    # should this behavior match with what we do in power?
    return t, gm_constrs(t, x_list, w)
Exemplo n.º 3
0
    def graph_implementation(arg_objs, size, data=None):
        """Reduces the atom to an affine expression and list of constraints.

        Parameters
        ----------
        arg_objs : list
            LinExpr for each argument.
        size : tuple
            The size of the resulting expression.
        data :
            Additional data required by the atom.

        Returns
        -------
        tuple
            (LinOp for objective, list of constraints)
        """
        x = arg_objs[0]
        p, w = data

        if p == 1:
            return x, []
        else:
            one = lu.create_const(np.mat(np.ones(size)), size)
            if p == 0:
                return one, []
            else:
                t = lu.create_var(size)

                if 0 < p < 1:
                    return t, gm_constrs(t, [x, one], w)
                elif p > 1:
                    return t, gm_constrs(x, [t, one], w)
                elif p < 0:
                    return t, gm_constrs(one, [x, t], w)
                else:
                    raise NotImplementedError(
                        'this power is not yet supported.')
Exemplo n.º 4
0
    def graph_implementation(arg_objs, size, data=None):
        """Reduces the atom to an affine expression and list of constraints.

        Parameters
        ----------
        arg_objs : list
            LinExpr for each argument.
        size : tuple
            The size of the resulting expression.
        data :
            Additional data required by the atom.

        Returns
        -------
        tuple
            (LinOp for objective, list of constraints)
        """
        x = arg_objs[0]
        p, w = data

        if p == 1:
            return x, []
        else:
            one = lu.create_const(np.mat(np.ones(size)), size)
            if p == 0:
                return one, []
            else:
                t = lu.create_var(size)

                if 0 < p < 1:
                    return t, gm_constrs(t, [x, one], w)
                elif p > 1:
                    return t, gm_constrs(x, [t, one], w)
                elif p < 0:
                    return t, gm_constrs(one, [x, t], w)
                else:
                    raise NotImplementedError('this power is not yet supported.')
Exemplo n.º 5
0
def power_canon(expr, args):
    x = args[0]
    p = expr.p
    w = expr.w

    if p == 1:
        return x, []

    shape = expr.shape
    ones = Constant(np.ones(shape))
    if p == 0:
        return ones, []
    else:
        t = Variable(shape)
        # TODO(akshayka): gm_constrs requires each of its inputs to be a Variable;
        # is this something that we want to change?
        if 0 < p < 1:
            return t, gm_constrs(t, [x, ones], w)
        elif p > 1:
            return t, gm_constrs(x, [t, ones], w)
        elif p < 0:
            return t, gm_constrs(ones, [x, t], w)
        else:
            raise NotImplementedError('This power is not yet supported.')
Exemplo n.º 6
0
    def graph_implementation(arg_objs, size, data=None):
        r"""Reduces the atom to an affine expression and list of constraints.

        Parameters
        ----------
        arg_objs : list
            LinExpr for each argument.
        size : tuple
            The size of the resulting expression.
        data :
            Additional data required by the atom.

        Returns
        -------
        tuple
            (LinOp for objective, list of constraints)

        Notes
        -----

        Implementation notes.

        - For general :math:`p \geq 1`, the inequality :math:`\|x\|_p \leq t`
          is equivalent to the following convex inequalities:

          .. math::

              |x_i| &\leq r_i^{1/p} t^{1 - 1/p}\\
              \sum_i r_i &= t.

          These inequalities happen to also be correct for :math:`p = +\infty`,
          if we interpret :math:`1/\infty` as :math:`0`.

        - For general :math:`0 < p < 1`, the inequality :math:`\|x\|_p \geq t`
          is equivalent to the following convex inequalities:

          .. math::

              r_i &\leq x_i^{p} t^{1 - p}\\
              \sum_i r_i &= t.

        - For general :math:`p < 0`, the inequality :math:`\|x\|_p \geq t`
          is equivalent to the following convex inequalities:

          .. math::

              t &\leq x_i^{-p/(1-p)} r_i^{1/(1 - p)}\\
              \sum_i r_i &= t.




        Although the inequalities above are correct, for a few special cases,
        we can represent the p-norm more efficiently and with fewer variables and inequalities.

        - For :math:`p = 1`, we use the representation

            .. math::

                x_i &\leq r_i\\
                -x_i &\leq r_i\\
                \sum_i r_i &= t

        - For :math:`p = \infty`, we use the representation

            .. math::

                x_i &\leq t\\
                -x_i &\leq t

          Note that we don't need the :math:`r` variable or the sum inequality.

        - For :math:`p = 2`, we use the natural second-order cone representation

            .. math::

                \|x\|_2 \leq t

          Note that we could have used the set of inequalities given above if we wanted
          an alternate decomposition of a large second-order cone into into several
          smaller inequalities.

        """
        p = data[0]
        axis = data[1]
        x = arg_objs[0]
        t = lu.create_var((1, 1))
        constraints = []

        # first, take care of the special cases of p = 2, inf, and 1
        if p == 2:
            if axis is None:
                return t, [SOC(t, [x])]

            else:
                t = lu.create_var(size)
                return t, [
                    SOC_Axis(lu.reshape(t, (t.size[0] * t.size[1], 1)), x,
                             axis)
                ]

        if p == np.inf:
            t_ = lu.promote(t, x.size)
            return t, [
                lu.create_leq(x, t_),
                lu.create_geq(lu.sum_expr([x, t_]))
            ]

        # we need an absolute value constraint for the symmetric convex branches (p >= 1)
        # we alias |x| as x from this point forward to make the code pretty :)
        if p >= 1:
            absx = lu.create_var(x.size)
            constraints += [
                lu.create_leq(x, absx),
                lu.create_geq(lu.sum_expr([x, absx]))
            ]
            x = absx

        if p == 1:
            return lu.sum_entries(x), constraints

        # now, we take care of the remaining convex and concave branches
        # to create the rational powers, we need a new variable, r, and
        # the constraint sum(r) == t
        r = lu.create_var(x.size)
        t_ = lu.promote(t, x.size)
        constraints += [lu.create_eq(lu.sum_entries(r), t)]

        # make p a fraction so that the input weight to gm_constrs
        # is a nice tuple of fractions.
        p = Fraction(p)
        if p < 0:
            constraints += gm_constrs(t_, [x, r], (-p / (1 - p), 1 / (1 - p)))
        if 0 < p < 1:
            constraints += gm_constrs(r, [x, t_], (p, 1 - p))
        if p > 1:
            constraints += gm_constrs(x, [r, t_], (1 / p, 1 - 1 / p))

        return t, constraints
Exemplo n.º 7
0
    def graph_implementation(arg_objs, size, data=None):
        r"""Reduces the atom to an affine expression and list of constraints.

        Parameters
        ----------
        arg_objs : list
            LinExpr for each argument.
        size : tuple
            The size of the resulting expression.
        data :
            Additional data required by the atom.

        Returns
        -------
        tuple
            (LinOp for objective, list of constraints)

        Notes
        -----

        Implementation notes.

        - For general :math:`p \geq 1`, the inequality :math:`\|x\|_p \leq t`
          is equivalent to the following convex inequalities:

          .. math::

              |x_i| &\leq r_i^{1/p} t^{1 - 1/p}\\
              \sum_i r_i &= t.

          These inequalities happen to also be correct for :math:`p = +\infty`,
          if we interpret :math:`1/\infty` as :math:`0`.

        - For general :math:`0 < p < 1`, the inequality :math:`\|x\|_p \geq t`
          is equivalent to the following convex inequalities:

          .. math::

              r_i &\leq x_i^{p} t^{1 - p}\\
              \sum_i r_i &= t.

        - For general :math:`p < 0`, the inequality :math:`\|x\|_p \geq t`
          is equivalent to the following convex inequalities:

          .. math::

              t &\leq x_i^{-p/(1-p)} r_i^{1/(1 - p)}\\
              \sum_i r_i &= t.




        Although the inequalities above are correct, for a few special cases,
        we can represent the p-norm more efficiently and with fewer variables and inequalities.

        - For :math:`p = 1`, we use the representation

            .. math::

                x_i &\leq r_i\\
                -x_i &\leq r_i\\
                \sum_i r_i &= t

        - For :math:`p = \infty`, we use the representation

            .. math::

                x_i &\leq t\\
                -x_i &\leq t

          Note that we don't need the :math:`r` variable or the sum inequality.

        - For :math:`p = 2`, we use the natural second-order cone representation

            .. math::

                \|x\|_2 \leq t

          Note that we could have used the set of inequalities given above if we wanted
          an alternate decomposition of a large second-order cone into into several
          smaller inequalities.

        """
        p = data[0]
        axis = data[1]
        x = arg_objs[0]
        t = lu.create_var((1, 1))
        constraints = []

        # first, take care of the special cases of p = 2, inf, and 1
        if p == 2:
            if axis is None:
                return t, [SOC(t, [x])]

            else:
                t = lu.create_var(size)
                return t, [SOC_Axis(lu.reshape(t, (t.size[0]*t.size[1], 1)),
                                    x, axis)]

        if p == np.inf:
            t_ = lu.promote(t, x.size)
            return t, [lu.create_leq(x, t_), lu.create_geq(lu.sum_expr([x, t_]))]

        # we need an absolute value constraint for the symmetric convex branches (p >= 1)
        # we alias |x| as x from this point forward to make the code pretty :)
        if p >= 1:
            absx = lu.create_var(x.size)
            constraints += [lu.create_leq(x, absx), lu.create_geq(lu.sum_expr([x, absx]))]
            x = absx

        if p == 1:
            return lu.sum_entries(x), constraints

        # now, we take care of the remaining convex and concave branches
        # to create the rational powers, we need a new variable, r, and
        # the constraint sum(r) == t
        r = lu.create_var(x.size)
        t_ = lu.promote(t, x.size)
        constraints += [lu.create_eq(lu.sum_entries(r), t)]

        # make p a fraction so that the input weight to gm_constrs
        # is a nice tuple of fractions.
        p = Fraction(p)
        if p < 0:
            constraints += gm_constrs(t_, [x, r],  (-p/(1-p), 1/(1-p)))
        if 0 < p < 1:
            constraints += gm_constrs(r,  [x, t_], (p, 1-p))
        if p > 1:
            constraints += gm_constrs(x,  [r, t_], (1/p, 1-1/p))

        return t, constraints