def _parse_poly(f): from sage.all import SR, QQ, HyperplaneArrangements, Matrix if type(f) == str: f = SR(f) if f.base_ring() == SR: L = f.factor_list() K = QQ else: L = list(f.factor()) K = f.base_ring() L = filter(lambda T: not T[0] in K, L) # Remove constant factors F, M = list(zip(*L)) # Verify that each polynomial factor is linear is_lin = lambda g: all(map(lambda x: g.degree(x) <= 1, g.variables())) if not all(map(is_lin, F)): raise ValueError("Expected product of linear factors.") varbs = f.variables() varbs_str = tuple(map(lambda x: str(x), varbs)) HH = HyperplaneArrangements(K, varbs_str) def poly_vec(g): c = K(g.subs({x: 0 for x in g.variables()})) return tuple([c] + [K(g.coefficient(x)) for x in varbs]) F_vec = tuple(map(poly_vec, F)) A = HH(Matrix(K, F_vec)) # This scrambles the hyperplanes, so we need to scramble M in the same way. A_vec = tuple(map(lambda H: tuple(H.coefficients()), A.hyperplanes())) perm = tuple([F_vec.index(v) for v in A_vec]) M_new = tuple([M[i] for i in perm]) return A, M_new
def multicrunch(surfsums, varname=None): """ Given an iterable consisting of SURFSums, compute the rational function given by their combined sum. Note that this rational function necessarily has degree <= 0. """ surfsums = list(surfsums) # # Combine the various critical sets and construct a candidate denominator. # critical = set().union(*(Q._critical for Q in surfsums)) cand = dict() for Q in surfsums: E = Q._cand for r in E: if r not in cand or cand[r] < E[r]: cand[r] = E[r] if varname is None: varname = 's' R = QQ[varname] s = R.gen(0) g = R(prod((a * s - b)**e for ((a, b), e) in cand.items())) m = g.degree() logger.info('Total number of SURFs: %d' % sum(Q._count for Q in surfsums)) for Q in surfsums: Q._file.flush() logger.info('Combined size of data files: %s' % readable_filesize(sum(os.path.getsize(Q._filename) for Q in surfsums))) logger.info('Number of critical points: %d' % len(critical)) logger.info('Degree of candidate denominator: %d' % m) # # Construct m + 1 non-critical points for evaluation. # values = set() while len(values) < m + 1: x = QQ.random_element() if x in critical: continue values.add(x) values = list(values) # # Set up parallel computations. # # bucket_size = ceil(float(len(values)) / common.ncpus) # this was unused dat_filenames = [Q._filename for Q in surfsums] res_names = [] val_names = [] value_batches = [values[j::common.ncpus] for j in range(common.ncpus)] with TemporaryDirectory() as tmpdir: for j, v in enumerate(value_batches): if not v: break val_filename = os.path.join(tmpdir, 'values%d' % j) val_names.append(val_filename) res_names.append(os.path.join(tmpdir, 'results%d' % j)) with open(val_filename, 'w') as val_file: val_file.write(str(len(v)) + '\n') for x in v: val_file.write(str(x) + '\n') def fun(k): ret = crunch(['crunch', val_names[k], res_names[k]] + dat_filenames) if ret == 0: logger.info('Cruncher #%d finished.' % k) return ret logger.info('Launching %d crunchers.' % len(res_names)) if not common.debug: fun = parallel(ncpus=len(res_names))(fun) for (arg, ret) in fun(list(range(len(res_names)))): if ret == 'NO DATA': raise RuntimeError('A parallel process died') if ret != 0: raise RuntimeError('crunch failed') else: for k in range(len(res_names)): fun(k) # # Collect results # pairs = [] for j, rn in enumerate(res_names): it_batch = iter(value_batches[j]) with open(rn, 'r') as res_file: for line in res_file: # We also need to evaluate the candidate denominator 'g' # from above at the given random points. x = QQ(next(it_batch)) pairs.append((x, g(x) * QQ(line))) if len(values) != len(pairs): raise RuntimeError('Length of results is off') f = R.lagrange_polynomial(list(pairs)) res = SR(f / g) return res.factor() if res else res