Esempio n. 1
0
def call_in_parallel(f, list_of_tuples, ncpus):
    r"""
    Call the function `f` in parallel

    INPUT:

    - ``f`` -- Function to call.
    - ``list_of_tuples`` -- A list of tuples to use as arguments to ``f``.
    - ``ncpus`` -- Integer. Default=4. The number of cpus to use in parallel.

    OUTPUT: A list of tuples. Each tuple contains an (args,keywds) pair, and a result.

    EFFECT:

    .. Note:

    ::

        See http://doc.sagemath.org/html/en/reference/parallel/sage/parallel/decorate.html

    EXAMPLE:

    ::

        sage: from boolean_cayley_graphs.classify_in_parallel import call_in_parallel
        sage: summ = lambda L: add(L)
        sage: call_in_parallel(summ,[((1,2),),((5,4),),((3,3),)],2)
        [((((1, 2),), {}), 3), ((((5, 4),), {}), 9), ((((3, 3),), {}), 6)]
    """
    parallelize = parallel(p_iter='fork', ncpus=ncpus)
    return list(parallelize(f)(list_of_tuples))
Esempio n. 2
0
    def solutions(self, inhomogeneous_equations, log_range):
        """
        Parallel version of :meth:`solutions_serial`

        INPUT/OUTPUT:

        Same as :meth:`solutions_serial`, except that the output
        points are in random order. Order depends on the number of
        processors and relative speed of separate processes.

        EXAMPLES::

            sage: R.<s> = GF(7)[]
            sage: P2.<x,y,z> = toric_varieties.P2(base_ring=GF(7))
            sage: X = P2.subscheme(1)
            sage: point_set = X.point_set()
            sage: ffe = point_set._enumerator()
            sage: ffe.solutions([s^2-1, s^6-s^2], [range(6)])
            <generator object solutions at 0x...>
            sage: sorted(_)
            [(0,), (3,)]
        """
        # Do simple cases in one process (this includes most doctests)
        if len(log_range) <= 2:
            for log_t in self.solutions_serial(inhomogeneous_equations,
                                               log_range):
                yield log_t
            raise StopIteration
        # Parallelize the outermost loop of the Cartesian product
        work = [([[r]] + log_range[1:], ) for r in log_range[0]]
        from sage.parallel.decorate import Parallel
        parallel = Parallel()

        def partial_solution(work_range):
            return list(
                self.solutions_serial(inhomogeneous_equations, work_range))

        for partial_result in parallel(partial_solution)(work):
            for log_t in partial_result[-1]:
                yield log_t
Esempio n. 3
0
    def solutions(self, inhomogeneous_equations, log_range):
        """
        Parallel version of :meth:`solutions_serial`

        INPUT/OUTPUT:

        Same as :meth:`solutions_serial`, except that the output
        points are in random order. Order depends on the number of
        processors and relative speed of separate processes.

        EXAMPLES::

            sage: R.<s> = GF(7)[]
            sage: P2.<x,y,z> = toric_varieties.P2(base_ring=GF(7))
            sage: X = P2.subscheme(1)
            sage: point_set = X.point_set()
            sage: ffe = point_set._enumerator()
            sage: ffe.solutions([s^2-1, s^6-s^2], [range(6)])
            <generator object solutions at 0x...>
            sage: sorted(_)
            [(0,), (3,)]
        """
        # Do simple cases in one process (this includes most doctests)
        if len(log_range) <= 2:
            for log_t in self.solutions_serial(inhomogeneous_equations, log_range):
                yield log_t
            raise StopIteration
        # Parallelize the outermost loop of the Cartesian product
        work = [([[r]] + log_range[1:],) for r in log_range[0]]
        from sage.parallel.decorate import Parallel

        parallel = Parallel()

        def partial_solution(work_range):
            return list(self.solutions_serial(inhomogeneous_equations, work_range))

        for partial_result in parallel(partial_solution)(work):
            for log_t in partial_result[-1]:
                yield log_t
Esempio n. 4
0
def subsets_with_hereditary_property(f, X, max_obstruction_size=None, ncpus=1):
    r"""
    Return all subsets `S` of `X` such that `f(S)` is true.

    The boolean function `f` must be decreasing, i.e. `f(S)\Rightarrow f(S')` if
    `S'\subseteq S`.

    This function is implemented to call `f` as few times as possible. More
    precisely, `f` will be called on all sets `S` such that `f(S)` is true, as
    well as on all inclusionwise minimal sets `S` such that `f(S)` is false.

    The problem that this function answers is also known as the learning problem
    on monotone boolean functions, or as computing the set of winning coalitions
    in a simple game.

    INPUT:

    - ``f`` -- a boolean function which takes as input a list of elements from
      ``X``.

    - ``X`` -- a list/iterable.

    - ``max_obstruction_size`` (integer) -- if you know that there is
      a `k` such that `f(S)` is true if and only if `f(S')` is true
      for all `S'\subseteq S` with `S'\leq k`, set
      ``max_obstruction_size=k``. It may dramatically decrease the
      number of calls to `f`. Set to ``None`` by default, meaning
      `k=|X|`.

    - ``ncpus`` -- number of cpus to use for this computation. Note that
      changing the value from `1` (default) to anything different *enables*
      parallel computations which can have a cost by itself, so it is not
      necessarily a good move. In some cases, however, it is a *great* move. Set
      to ``None`` to automatically detect and use the maximum number of cpus
      available.

      .. NOTE::

          Parallel computations are performed through the
          :func:`~sage.parallel.decorate.parallel` decorator. See its
          documentation for more information, in particular with respect to the
          memory context.

    EXAMPLE:

    Sets whose elements all have the same remainder mod 2::

        sage: from sage.combinat.subsets_hereditary import subsets_with_hereditary_property
        sage: f = lambda x: (not x) or all(xx%2 == x[0]%2 for xx in x)
        sage: list(subsets_with_hereditary_property(f,range(4)))
        [[], [0], [1], [2], [3], [0, 2], [1, 3]]

    Same, on two threads::

        sage: sorted(list(subsets_with_hereditary_property(f,range(4),ncpus=2)))
        [[], [0], [0, 2], [1], [1, 3], [2], [3]]

    One can use this function to compute the independent sets of a graph. We
    know, however, that in this case the maximum obstructions are the edges, and
    have size 2. We can thus set ``max_obstruction_size=2``, which reduces the
    number of calls to `f` from 91 to 56::

        sage: num_calls=0
        sage: g = graphs.PetersenGraph()
        sage: def is_independent_set(S):
        ....:     global num_calls
        ....:     num_calls+=1
        ....:     return g.subgraph(S).size()==0
        sage: l1=list(subsets_with_hereditary_property(is_independent_set,g.vertices()))
        sage: num_calls
        91
        sage: num_calls=0
        sage: l2=list(subsets_with_hereditary_property(is_independent_set,g.vertices(),max_obstruction_size=2))
        sage: num_calls
        56
        sage: l1==l2
        True

    TESTS::

        sage: list(subsets_with_hereditary_property(lambda x:False,range(4)))
        []
        sage: list(subsets_with_hereditary_property(lambda x:len(x)<1,range(4)))
        [[]]
        sage: list(subsets_with_hereditary_property(lambda x:True,range(2)))
        [[], [0], [1], [0, 1]]
    """
    from sage.data_structures.bitset import Bitset
    from sage.parallel.decorate import parallel
    # About the implementation:
    #
    # 1) We work on X={0,...,n-1} but remember X to return correctly
    #    labelled answers.
    #
    # 2) We maintain a list of sets S such that f(S)=0 (i.e. 'no-sets'), in
    #    order to filter out larger sets for which f is necessarily False.
    #
    # 3) Those sets are stored in an array: bs[i] represents the set of all
    #    no-sets S we found such that i is NOT in S. Why ? Because it makes it
    #    easy to filter out sets: if a set S' whose *complement* is
    #    {i1,i2,...,ik} is such that bs[i1]&bs[i2]&...&bs[ik] is nonempty then
    #    f(S') is necessarily False.
    X_labels = list(X)
    n = len(X_labels)
    X = set(range(n))
    if max_obstruction_size is None:
        max_obstruction_size = n

    bs = [Bitset([], 1) for _ in range(n)]  # collection of no-set
    nforb = 1  # number of no-sets stored
    current_layer = [[]]  # all yes-sets of size 'current_size'
    current_size = 0

    def explore_neighbors(s):
        r"""
        Explores the successors of a set s.

        The successors of a set s are all the sets s+[i] where max(s)<i. This
        function returns them all as a partition `(yes_sets,no_sets)`.
        """
        new_yes_sets = []
        new_no_sets = []
        for i in range((s[-1] + 1 if s else 0), n):  # all ways to extend it
            s_plus_i = s + [i]  # the extended set
            s_plus_i_c = Bitset(s_plus_i,
                                n).complement()  # .. and its complement

            # Filter a no-set using the data collected so far.
            inter = Bitset([], nforb).complement()
            for j in s_plus_i_c:
                inter.intersection_update(bs[j])

            # If we cannot decide yet we must call f(S)
            if not inter:
                if set_size >= max_obstruction_size or f(
                    [X_labels[xx] for xx in s_plus_i]):
                    new_yes_sets.append(s_plus_i)
                else:
                    new_no_sets.append(s_plus_i)
        return (new_yes_sets, new_no_sets)

    # The empty set
    if f([]):
        yield []
    else:
        return

    if ncpus != 1:
        explore_neighbors_paral = parallel(ncpus=ncpus)(explore_neighbors)

    # All sets of size 0, then size 1, then ...
    set_size = -1
    while current_layer:
        set_size += 1
        new_no_sets = []
        new_yes_sets = []

        if ncpus == 1:
            yes_no_iter = (explore_neighbors(s) for s in current_layer)
        else:
            yes_no_iter = ((yes, no) for (_, (
                yes, no)) in explore_neighbors_paral(current_layer))

        for yes, no in yes_no_iter:
            new_yes_sets.extend(yes)
            new_no_sets.extend(no)
            for s in yes:
                yield [X_labels[xx] for xx in s]

        current_layer = new_yes_sets

        # Update bs with the new no-sets
        new_nforb = nforb + len(new_no_sets)
        for b in bs:  # resize the bitsets
            b.add(new_nforb)
            b.discard(new_nforb)
        for i, s in enumerate(new_no_sets):  # Fill the new entries
            for j in X.difference(s):
                bs[j].add(i + nforb)
        nforb = new_nforb
        current_size += 1

    # Did we forget to return X itself ?
    #
    # If we did, this was probably the worst choice of algorithm for we computed
    # f(X) for all 2^n sets X, but well...
    if (current_size == len(X) and nforb == 1 and f(X_labels)):
        yield X_labels
Esempio n. 5
0
def subsets_with_hereditary_property(f, X, max_obstruction_size=None, ncpus=1):
    r"""
    Return all subsets `S` of `X` such that `f(S)` is true.

    The boolean function `f` must be decreasing, i.e. `f(S)\Rightarrow f(S')` if
    `S'\subseteq S`.

    This function is implemented to call `f` as few times as possible. More
    precisely, `f` will be called on all sets `S` such that `f(S)` is true, as
    well as on all inclusionwise minimal sets `S` such that `f(S)` is false.

    The problem that this function answers is also known as the learning problem
    on monotone boolean functions, or as computing the set of winning coalitions
    in a simple game.

    INPUT:

    - ``f`` -- a boolean function which takes as input a list of elements from
      ``X``.

    - ``X`` -- a list/iterable.

    - ``max_obstruction_size`` (integer) -- if you know that there is
      a `k` such that `f(S)` is true if and only if `f(S')` is true
      for all `S'\subseteq S` with `S'\leq k`, set
      ``max_obstruction_size=k``. It may dramatically decrease the
      number of calls to `f`. Set to ``None`` by default, meaning
      `k=|X|`.

    - ``ncpus`` -- number of cpus to use for this computation. Note that
      changing the value from `1` (default) to anything different *enables*
      parallel computations which can have a cost by itself, so it is not
      necessarily a good move. In some cases, however, it is a *great* move. Set
      to ``None`` to automatically detect and use the maximum number of cpus
      available.

      .. NOTE::

          Parallel computations are performed through the
          :func:`~sage.parallel.decorate.parallel` decorator. See its
          documentation for more information, in particular with respect to the
          memory context.

    EXAMPLE:

    Sets whose elements all have the same remainder mod 2::

        sage: from sage.combinat.subsets_hereditary import subsets_with_hereditary_property
        sage: f = lambda x: (not x) or all(xx%2 == x[0]%2 for xx in x)
        sage: list(subsets_with_hereditary_property(f,range(4)))
        [[], [0], [1], [2], [3], [0, 2], [1, 3]]

    Same, on two threads::

        sage: sorted(list(subsets_with_hereditary_property(f,range(4),ncpus=2)))
        [[], [0], [0, 2], [1], [1, 3], [2], [3]]

    One can use this function to compute the independent sets of a graph. We
    know, however, that in this case the maximum obstructions are the edges, and
    have size 2. We can thus set ``max_obstruction_size=2``, which reduces the
    number of calls to `f` from 91 to 56::

        sage: num_calls=0
        sage: g = graphs.PetersenGraph()
        sage: def is_independent_set(S):
        ....:     global num_calls
        ....:     num_calls+=1
        ....:     return g.subgraph(S).size()==0
        sage: l1=list(subsets_with_hereditary_property(is_independent_set,g.vertices()))
        sage: num_calls
        91
        sage: num_calls=0
        sage: l2=list(subsets_with_hereditary_property(is_independent_set,g.vertices(),max_obstruction_size=2))
        sage: num_calls
        56
        sage: l1==l2
        True

    TESTS::

        sage: list(subsets_with_hereditary_property(lambda x:False,range(4)))
        []
        sage: list(subsets_with_hereditary_property(lambda x:len(x)<1,range(4)))
        [[]]
        sage: list(subsets_with_hereditary_property(lambda x:True,range(2)))
        [[], [0], [1], [0, 1]]
    """
    from sage.data_structures.bitset import Bitset
    from sage.parallel.decorate import parallel

    # About the implementation:
    #
    # 1) We work on X={0,...,n-1} but remember X to return correctly
    #    labelled answers.
    #
    # 2) We maintain a list of sets S such that f(S)=0 (i.e. 'no-sets'), in
    #    order to filter out larger sets for which f is necessarily False.
    #
    # 3) Those sets are stored in an array: bs[i] represents the set of all
    #    no-sets S we found such that i is NOT in S. Why ? Because it makes it
    #    easy to filter out sets: if a set S' whose *complement* is
    #    {i1,i2,...,ik} is such that bs[i1]&bs[i2]&...&bs[ik] is nonempty then
    #    f(S') is necessarily False.
    X_labels = list(X)
    n = len(X_labels)
    X = set(range(n))
    if max_obstruction_size is None:
        max_obstruction_size = n

    bs = [Bitset([], 1) for _ in range(n)]  # collection of no-set
    nforb = 1  # number of no-sets stored
    current_layer = [[]]  # all yes-sets of size 'current_size'
    current_size = 0

    def explore_neighbors(s):
        r"""
        Explores the successors of a set s.

        The successors of a set s are all the sets s+[i] where max(s)<i. This
        function returns them all as a partition `(yes_sets,no_sets)`.
        """
        new_yes_sets = []
        new_no_sets = []
        for i in range((s[-1] + 1 if s else 0), n):  # all ways to extend it
            s_plus_i = s + [i]  # the extended set
            s_plus_i_c = Bitset(s_plus_i, n).complement()  # .. and its complement

            # Filter a no-set using the data collected so far.
            inter = Bitset([], nforb).complement()
            for j in s_plus_i_c:
                inter.intersection_update(bs[j])

            # If we cannot decide yet we must call f(S)
            if not inter:
                if set_size >= max_obstruction_size or f([X_labels[xx] for xx in s_plus_i]):
                    new_yes_sets.append(s_plus_i)
                else:
                    new_no_sets.append(s_plus_i)
        return (new_yes_sets, new_no_sets)

    # The empty set
    if f([]):
        yield []
    else:
        return

    if ncpus != 1:
        explore_neighbors_paral = parallel(ncpus=ncpus)(explore_neighbors)

    # All sets of size 0, then size 1, then ...
    set_size = -1
    while current_layer:
        set_size += 1
        new_no_sets = []
        new_yes_sets = []

        if ncpus == 1:
            yes_no_iter = (explore_neighbors(s) for s in current_layer)
        else:
            yes_no_iter = ((yes, no) for (_, (yes, no)) in explore_neighbors_paral(current_layer))

        for yes, no in yes_no_iter:
            new_yes_sets.extend(yes)
            new_no_sets.extend(no)
            for s in yes:
                yield [X_labels[xx] for xx in s]

        current_layer = new_yes_sets

        # Update bs with the new no-sets
        new_nforb = nforb + len(new_no_sets)
        for b in bs:  # resize the bitsets
            b.add(new_nforb)
            b.discard(new_nforb)
        for i, s in enumerate(new_no_sets):  # Fill the new entries
            for j in X.difference(s):
                bs[j].add(i + nforb)
        nforb = new_nforb
        current_size += 1

    # Did we forget to return X itself ?
    #
    # If we did, this was probably the worst choice of algorithm for we computed
    # f(X) for all 2^n sets X, but well...
    if current_size == len(X) and nforb == 1 and f(X_labels):
        yield X_labels
Esempio n. 6
0
def multicrunch(surfsums, varname=None):
    """
    Given an iterable consisting of SURFSums, compute the rational function
    given by their combined sum.
    Note that this rational function necessarily has degree <= 0.
    """

    surfsums = list(surfsums)

    #
    # Combine the various critical sets and construct a candidate denominator.
    #

    critical = set().union(*(Q._critical for Q in surfsums))
    cand = dict()
    for Q in surfsums:
        E = Q._cand
        for r in E:
            if r not in cand or cand[r] < E[r]:
                cand[r] = E[r]

    if varname is None:
        varname = 's'

    R = QQ[varname]
    s = R.gen(0)
    g = R(prod((a * s - b)**e for ((a, b), e) in cand.items()))
    m = g.degree()

    logger.info('Total number of SURFs: %d' % sum(Q._count for Q in surfsums))

    for Q in surfsums:
        Q._file.flush()

    logger.info('Combined size of data files: %s' %
         readable_filesize(sum(os.path.getsize(Q._filename) for Q in surfsums)))
    logger.info('Number of critical points: %d' % len(critical))
    logger.info('Degree of candidate denominator: %d' % m)

    #
    # Construct m + 1 non-critical points for evaluation.
    #

    values = set()
    while len(values) < m + 1:
        x = QQ.random_element()
        if x in critical:
            continue
        values.add(x)
    values = list(values)

    #
    # Set up parallel computations.
    #

    # bucket_size = ceil(float(len(values)) / common.ncpus)
    # this was unused

    dat_filenames = [Q._filename for Q in surfsums]

    res_names = []
    val_names = []

    value_batches = [values[j::common.ncpus] for j in range(common.ncpus)]

    with TemporaryDirectory() as tmpdir:
        for j, v in enumerate(value_batches):
            if not v:
                break

            val_filename = os.path.join(tmpdir, 'values%d' % j)
            val_names.append(val_filename)
            res_names.append(os.path.join(tmpdir, 'results%d' % j))
            with open(val_filename, 'w') as val_file:
                val_file.write(str(len(v)) + '\n')
                for x in v:
                    val_file.write(str(x) + '\n')

        def fun(k):
            ret = crunch(['crunch', val_names[k], res_names[k]] + dat_filenames)
            if ret == 0:
                logger.info('Cruncher #%d finished.' % k)
            return ret

        logger.info('Launching %d crunchers.' % len(res_names))

        if not common.debug:
            fun = parallel(ncpus=len(res_names))(fun)
            for (arg, ret) in fun(list(range(len(res_names)))):
                if ret == 'NO DATA':
                    raise RuntimeError('A parallel process died')
                if ret != 0:
                    raise RuntimeError('crunch failed')
        else:
            for k in range(len(res_names)):
                fun(k)
                

        #
        # Collect results
        #
        pairs = []

        for j, rn in enumerate(res_names):
            it_batch = iter(value_batches[j])
            with open(rn, 'r') as res_file:
                for line in res_file:
                    # We also need to evaluate the candidate denominator 'g'
                    # from above at the given random points.
                    x = QQ(next(it_batch))
                    pairs.append((x, g(x) * QQ(line)))

    if len(values) != len(pairs):
        raise RuntimeError('Length of results is off')

    f = R.lagrange_polynomial(list(pairs))
    res = SR(f / g)
    return res.factor() if res else res
    def apply_Up(self,c,group = None,scale = 1,parallelize = False,times = 0,progress_bar = False,method = 'naive', repslocal = None, Up_reps = None, steps = 1):
        r"""
        Apply the Up Hecke operator operator to ``c``.
        """
        assert steps >= 1

        V = self.coefficient_module()
        R = V.base_ring()
        gammas = self.group().gens()

        if Up_reps is None:
            Up_reps = self.S_arithgroup().get_Up_reps()

        if repslocal is None:
            try:
                prec = V.base_ring().precision_cap()
            except AttributeError:
                prec = None
            repslocal = self.get_Up_reps_local(prec)
        i = 0
        if method == 'naive':
            assert times == 0
            G = self.S_arithgroup()
            Gn = G.large_group()
            if self.use_shapiro():
                if self.coefficient_module().trivial_action():
                    def calculate_Up_contribution(lst, c, i, j):
                        return sum([c.evaluate_and_identity(tt) for sk, tt in lst])
                else:
                    def calculate_Up_contribution(lst, c, i, j):
                        return sum([sk * c.evaluate_and_identity(tt) for sk, tt in lst])

                input_vec = []
                for j, gamma in enumerate(gammas):
                    for i, xi in enumerate(G.coset_reps()):
                        delta = Gn(G.get_coset_ti(set_immutable(xi * gamma.quaternion_rep))[0])
                        input_vec.append(([(sk, Gn.get_hecke_ti(g,delta)) for sk, g in zip(repslocal, Up_reps)], c, i, j))
                vals = [[V.coefficient_module()(0,normalize=False) for xi in G.coset_reps()] for gamma in gammas]
                if parallelize:
                    for inp, outp in parallel(calculate_Up_contribution)(input_vec):
                        vals[inp[0][-1]][inp[0][-2]] += outp
                else:
                    for inp in input_vec:
                        outp = calculate_Up_contribution(*inp)
                        vals[inp[-1]][inp[-2]] += outp
                ans = self([V(o) for o in vals])
            else:
                Gpn = G.small_group()
                if self.trivial_action():
                    def calculate_Up_contribution(lst,c,num_gamma):
                        return sum([c.evaluate(tt) for sk, tt in lst], V(0,normalize=False))
                else:
                    def calculate_Up_contribution(lst,c,num_gamma,pb_fraction=None):
                        i = 0
                        ans = V(0, normalize=False)
                        for sk, tt in lst:
                            ans += sk * c.evaluate(tt)
                            update_progress(i * pb_fraction, 'Up action')
                        return ans
                input_vec = []
                for j,gamma in enumerate(gammas):
                    input_vec.append(([(sk, Gpn.get_hecke_ti(g,gamma)) for sk, g in zip(repslocal, Up_reps)], c, j))
                vals = [V(0,normalize=False) for gamma in gammas]
                if parallelize:
                    for inp,outp in parallel(calculate_Up_contribution)(input_vec):
                        vals[inp[0][-1]] += outp
                else:
                    for counter, inp in enumerate(input_vec):
                        outp = calculate_Up_contribution(*inp, pb_fraction=float(1)/float(len(repslocal) * len(input_vec)))
                        vals[inp[-1]] += outp
                ans = self(vals)
            if scale != 1:
                ans = scale * ans
        else:
            assert method == 'bigmatrix'
            verbose('Getting Up matrices...')
            try:
                N = len(V(0)._moments.list())
            except AttributeError:
                N = 1
            nreps = len(Up_reps)
            ngens = len(self.group().gens())
            NN = ngens * N
            A = Matrix(ZZ,NN,NN,0)
            total_counter = ngens**2
            counter = 0
            iS = 0
            for i,gi in enumerate(self.group().gens()):
                ti = [tuple(self.group().get_hecke_ti(sk,gi).word_rep) for sk in Up_reps]
                jS = 0
                for ans in find_newans(self,repslocal,ti):
                    A.set_block(iS,jS,ans)
                    jS += N
                    if progress_bar:
                        counter +=1
                        update_progress(float(counter)/float(total_counter),'Up matrix')
                iS += N
            verbose('Computing 2^(%s)-th power of a %s x %s matrix'%(times,A.nrows(),A.ncols()))
            for i in range(times):
                A = A**2
                if N != 0:
                    A = A.apply_map(lambda x: x % self._pN)
                update_progress(float(i+1)/float(times),'Exponentiating matrix')
            verbose('Done computing 2^(%s)-th power'%times)
            if times > 0:
                scale_factor = ZZ(scale).powermod(2**times,self._pN)
            else:
                scale_factor = ZZ(scale)
            bvec = Matrix(R,NN,1,[o for b in c._val for o in b._moments.list()])
            if scale_factor != 1:
                bvec = scale_factor * bvec
            valmat = A * bvec
            appr_module = V.approx_module(N)
            ans = self([V(appr_module(valmat.submatrix(row=i,nrows = N).list())) for i in xrange(0,valmat.nrows(),N)])
        if steps <= 1:
            return ans
        else:
            return self.apply_Up(ans, group = group,scale = scale,parallelize = parallelize,times = times,progress_bar = progress_bar,method = method, repslocal = repslocal, steps = steps -1)