コード例 #1
0
def higher_genus_w_automorphisms_search(info, query):
    if info.get('signature'):
        #allow for ; in signature
        info['signature'] = info['signature'].replace(';',',')
        parse_bracketed_posints(info,query,'signature',split=False,name='Signature',keepbrackets=True)
        if query.get('signature'):
            query['signature'] = info['signature'] = str(sort_sign(ast.literal_eval(query['signature']))).replace(' ','')
    parse_gap_id(info,query,'group',name='Group',qfield='group')
    parse_ints(info,query,'genus',name='Genus')
    parse_ints(info,query,'dim',name='Dimension of the family')
    if 'inc_hyper' in info:
        if info['inc_hyper'] == 'exclude':
            query['hyperelliptic'] = False
        elif info['inc_hyper'] == 'only':
            query['hyperelliptic'] = True
    if 'inc_cyc_trig' in info:
        if info['inc_cyc_trig'] == 'exclude':
            query['cyclic_trigonal'] = False
        elif info['inc_cyc_trig'] == 'only':
            query['cyclic_trigonal'] = True
    if 'inc_full' in info:
        if info['inc_full'] == 'exclude':
            query['full_auto'] = {'$exists': True}
        elif info['inc_full'] == 'only':
            query['full_auto'] = {'$exists': False}
    query['cc.1'] = 1
    if info.get('groupsize'):
        err, result = add_group_order_range(query, info['groupsize'])
        if err is not None:
            flash_error('Parse error on group order field. <font face="Courier New"><br />Given: ' + err + '<br />-------' + result + '</font>')

    info['group_display'] = sg_pretty
    info['sign_display'] = sign_display
コード例 #2
0
ファイル: elliptic_curve.py プロジェクト: koffie/lmfdb
def elliptic_curve_search(info, query):
    parse_rational(info,query,'jinv','j-invariant')
    parse_ints(info,query,'conductor')
    parse_ints(info,query,'torsion','torsion order')
    parse_ints(info,query,'rank')
    parse_ints(info,query,'sha','analytic order of &#1064;')
    parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,check_divisibility='increasing')
    # speed up slow torsion_structure searches by also setting torsion
    #if 'torsion_structure' in query and not 'torsion' in query:
    #    query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne' : 0}
    parse_element_of(info,query,field='isodeg',qfield='isogeny_degrees',split_interval=1000)
    #parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')
    parse_primes(info, query, 'surj_primes', name='maximal primes',
                 qfield='nonmax_primes', mode='complement')
    if info.get('surj_quantifier') == 'exactly':
        mode = 'exact'
    else:
        mode = 'append'
    parse_primes(info, query, 'nonsurj_primes', name='non-maximal primes',
                 qfield='nonmax_primes',mode=mode, radical='nonmax_rad')
    if 'optimal' in info and info['optimal'] == 'on':
        # fails on 990h3
        query['number'] = 1

    info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number'])
    info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1])
コード例 #3
0
ファイル: main.py プロジェクト: koffie/lmfdb
def genus2_curve_search(info, query):
    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    parse_ints(info,query,'abs_disc','absolute discriminant')
    parse_bool(info,query,'is_gl2_type','is of GL2-type')
    parse_bool(info,query,'has_square_sha','has square Sha')
    parse_bool(info,query,'locally_solvable','is locally solvable')
    parse_bool(info,query,'is_simple_geom','is geometrically simple')
    parse_ints(info,query,'cond','conductor')
    parse_ints(info,query,'num_rat_pts','rational points')
    parse_ints(info,query,'num_rat_wpts','rational Weierstrass points')
    parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing")
    parse_ints(info,query,'torsion_order','torsion order')
    if 'torsion' in query and not 'torsion_order' in query:
        query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1)
    if 'torsion' in query:
        query['torsion_subgroup'] = str(query['torsion']).replace(" ","")
        query.pop('torsion') # search using string key, not array of ints
    parse_ints(info,query,'two_selmer_rank','2-Selmer rank')
    parse_ints(info,query,'analytic_rank','analytic rank')
    # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
    if 'g20' in info and 'g21' in info and 'g22' in info:
        query['g2_inv'] = "['%s','%s','%s']"%(info['g20'], info['g21'], info['g22'])
    if 'class' in info:
        query['class'] = info['class']
    for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'):
        if info.get(fld): query[fld] = info[fld]
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
コード例 #4
0
def galois_group_search(info, query):
    def includes_composite(s):
        s = s.replace(' ','').replace('..','-')
        for interval in s.split(','):
            if '-' in interval[1:]:
                ix = interval.index('-',1)
                a,b = int(interval[:ix]), int(interval[ix+1:])
                if b == a:
                    if a != 1 and not a.is_prime():
                        return True
                if b > a and b > 3:
                    return True
            else:
                a = ZZ(interval)
                if a != 1 and not a.is_prime():
                    return True
    parse_ints(info,query,'n','degree')
    parse_ints(info,query,'t')
    parse_ints(info,query,'order')
    parse_bracketed_posints(info, query, qfield='gapidfull', split=False, exactlength=2, keepbrackets=True, name='GAP id', field='gapid')
    for param in ('cyc', 'solv', 'prim'):
        parse_bool(info, query, param, process=int, blank=['0','Any'])
    parse_restricted(info,query,'parity',allowed=[1,-1],process=int,blank=['0','Any'])
    if 'order' in query and 'n' not in query:
        query['__sort__'] = ['order', 'gapid', 'n', 't']

    degree_str = prep_ranges(info.get('n'))
    info['show_subs'] = degree_str is None or (LIST_RE.match(degree_str) and includes_composite(degree_str))
    info['group_display'] = group_display_pretty
    info['yesno'] = yesno
    info['wgg'] = WebGaloisGroup.from_data
コード例 #5
0
ファイル: main.py プロジェクト: kedlaya/lmfdb
def higher_genus_w_automorphisms_search(info, query):
    if info.get('signature'):
        #allow for ; in signature
        info['signature'] = info['signature'].replace(';',',')
        parse_bracketed_posints(info,query,'signature',split=False,name='Signature',keepbrackets=True)
        if query.get('signature'):
            query['signature'] = info['signature'] = str(sort_sign(ast.literal_eval(query['signature']))).replace(' ','')
    parse_gap_id(info,query,'group',name='Group',qfield='group')
    parse_ints(info,query,'genus',name='Genus')
    parse_ints(info,query,'dim',name='Dimension of the family')
    if 'inc_hyper' in info:
        if info['inc_hyper'] == 'exclude':
            query['hyperelliptic'] = False
        elif info['inc_hyper'] == 'only':
            query['hyperelliptic'] = True
    if 'inc_cyc_trig' in info:
        if info['inc_cyc_trig'] == 'exclude':
            query['cyclic_trigonal'] = False
        elif info['inc_cyc_trig'] == 'only':
            query['cyclic_trigonal'] = True
    if 'inc_full' in info:
        if info['inc_full'] == 'exclude':
            query['full_auto'] = {'$exists': True}
        elif info['inc_full'] == 'only':
            query['full_auto'] = {'$exists': False}
    query['cc.1'] = 1
    if info.get('groupsize'):
        err, result = add_group_order_range(query, info['groupsize'])
        if err is not None:
            flash_error('Parse error on group order field. <font face="Courier New"><br />Given: ' + err + '<br />-------' + result + '</font>')

    info['group_display'] = sg_pretty
    info['sign_display'] = sign_display
コード例 #6
0
def elliptic_curve_search(info, query):
    parse_nf_string(info,
                    query,
                    'field',
                    name="base number field",
                    qfield='field_label')
    if query.get('field_label') == '1.1.1.1':
        return redirect(url_for("ec.rational_elliptic_curves", **request.args),
                        301)

    parse_ints(info, query, 'conductor_norm')
    parse_noop(info, query, 'conductor_label')
    parse_ints(info,
               query,
               'torsion',
               name='Torsion order',
               qfield='torsion_order')
    parse_bracketed_posints(info, query, 'torsion_structure', maxlength=2)
    if 'torsion_structure' in query and not 'torsion_order' in query:
        query['torsion_order'] = reduce(
            mul, [int(n) for n in query['torsion_structure']], 1)
    parse_ints(info, query, field='isodeg', qfield='isogeny_degrees')

    if 'jinv' in info:
        if info.get('field', '').strip() == '2.2.5.1':
            info['jinv'] = info['jinv'].replace('phi', 'a')
        if info.get('field', '').strip() == '2.0.4.1':
            info['jinv'] = info['jinv'].replace('i', 'a')
    parse_nf_elt(info, query, 'jinv', name='j-invariant')
    if query.get('jinv'):
        query['jinv'] = ','.join(query['jinv'])

    if 'include_isogenous' in info and info['include_isogenous'] == 'off':
        info['number'] = 1
        query['number'] = 1

    if 'include_base_change' in info:
        if info['include_base_change'] == 'off':
            query['base_change'] = []
        if info['include_base_change'] == 'only':
            query['base_change'] = {'$ne': []}
    else:
        info['include_base_change'] = "on"

    if 'include_Q_curves' in info:
        if info['include_Q_curves'] == 'exclude':
            query['q_curve'] = False
        elif info['include_Q_curves'] == 'only':
            query['q_curve'] = True

    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne': 0}

    info['field_pretty'] = field_pretty
    info['web_ainvs'] = web_ainvs
コード例 #7
0
ファイル: elliptic_curve.py プロジェクト: sanni85/lmfdb
def elliptic_curve_search(info, query):
    parse_rational(info, query, 'jinv', 'j-invariant')
    parse_ints(info, query, 'conductor')
    parse_ints(info, query, 'torsion', 'torsion order')
    parse_ints(info, query, 'rank')
    parse_ints(info, query, 'sha', 'analytic order of &#1064;')
    parse_bracketed_posints(info,
                            query,
                            'torsion_structure',
                            maxlength=2,
                            check_divisibility='increasing')
    # speed up slow torsion_structure searches by also setting torsion
    #if 'torsion_structure' in query and not 'torsion' in query:
    #    query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne': 0}
    parse_element_of(info,
                     query,
                     field='isodeg',
                     qfield='isogeny_degrees',
                     split_interval=1000)
    #parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')
    parse_primes(info,
                 query,
                 'surj_primes',
                 name='maximal primes',
                 qfield='nonmax_primes',
                 mode='complement')
    if info.get('surj_quantifier') == 'exactly':
        mode = 'exact'
    else:
        mode = 'append'
    parse_primes(info,
                 query,
                 'nonsurj_primes',
                 name='non-maximal primes',
                 qfield='nonmax_primes',
                 mode=mode,
                 radical='nonmax_rad')
    if 'optimal' in info and info['optimal'] == 'on':
        # fails on 990h3
        query['number'] = 1

    info['curve_url'] = lambda dbc: url_for(".by_triple_label",
                                            conductor=dbc['conductor'],
                                            iso_label=split_lmfdb_label(dbc[
                                                'lmfdb_iso'])[1],
                                            number=dbc['lmfdb_number'])
    info['iso_url'] = lambda dbc: url_for(".by_double_iso_label",
                                          conductor=dbc['conductor'],
                                          iso_label=split_lmfdb_label(dbc[
                                              'lmfdb_iso'])[1])
コード例 #8
0
ファイル: main.py プロジェクト: kedlaya/lmfdb
def elliptic_curve_search(info, query):
    parse_nf_string(info,query,'field',name="base number field",qfield='field_label')
    if query.get('field_label') == '1.1.1.1':
        return redirect(url_for("ec.rational_elliptic_curves", **request.args), 301)

    parse_ints(info,query,'conductor_norm')
    parse_noop(info,query,'conductor_label')
    parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order')
    parse_bracketed_posints(info,query,'torsion_structure',maxlength=2)
    if 'torsion_structure' in query and not 'torsion_order' in query:
        query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
    parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')

    if 'jinv' in info:
        if info.get('field','').strip() == '2.2.5.1':
            info['jinv'] = info['jinv'].replace('phi','a')
        if info.get('field','').strip() == '2.0.4.1':
            info['jinv'] = info['jinv'].replace('i','a')
    parse_nf_elt(info,query,'jinv',name='j-invariant')
    if query.get('jinv'):
        query['jinv'] =','.join(query['jinv'])

    if 'include_isogenous' in info and info['include_isogenous'] == 'off':
        info['number'] = 1
        query['number'] = 1

    if 'include_base_change' in info:
        if info['include_base_change'] == 'off':
            query['base_change'] = []
        if info['include_base_change'] == 'only':
            query['base_change'] = {'$ne':[]}
    else:
        info['include_base_change'] = "on"

    if 'include_Q_curves' in info:
        if info['include_Q_curves'] == 'exclude':
            query['q_curve'] = False
        elif info['include_Q_curves'] == 'only':
            query['q_curve'] = True

    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne' : 0}

    info['field_pretty'] = field_pretty
    info['web_ainvs'] = web_ainvs
コード例 #9
0
def number_field_search(info, query):
    parse_ints(info, query, 'degree')
    parse_galgrp(info, query, qfield=('degree', 'galt'))
    parse_bracketed_posints(info,
                            query,
                            'signature',
                            qfield=('degree', 'r2'),
                            exactlength=2,
                            extractor=lambda L: (L[0] + 2 * L[1], L[1]))
    parse_signed_ints(info,
                      query,
                      'discriminant',
                      qfield=('disc_sign', 'disc_abs'))
    parse_ints(info, query, 'class_number')
    parse_bracketed_posints(info,
                            query,
                            'class_group',
                            check_divisibility='increasing',
                            process=int)
    parse_primes(info,
                 query,
                 'ur_primes',
                 name='Unramified primes',
                 qfield='ramps',
                 mode='complement')
    # modes are now contained (in), exactly, include
    if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include':
        mode = 'append'
    elif 'ram_quantifier' in info and str(
            info['ram_quantifier']) == 'contained':
        mode = 'subsets'
    else:
        mode = 'exact'
    parse_primes(info,
                 query,
                 'ram_primes',
                 name='Ramified primes',
                 qfield='ramps',
                 mode=mode,
                 radical='disc_rad')
    ## This seems not to be used
    #if 'lucky' in info:
    #    label = db.nf_fields.lucky(query, 0)
    #    if label:
    #        return redirect(url_for(".by_label", label=clean_input(label)))
    info['wnf'] = WebNumberField.from_data
コード例 #10
0
def genus2_curve_search(info, query):
    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    parse_ints(info, query, 'abs_disc', 'absolute discriminant')
    parse_bool(info, query, 'is_gl2_type', 'is of GL2-type')
    parse_bool(info, query, 'has_square_sha', 'has square Sha')
    parse_bool(info, query, 'locally_solvable', 'is locally solvable')
    parse_bool(info, query, 'is_simple_geom', 'is geometrically simple')
    parse_ints(info, query, 'cond', 'conductor')
    parse_ints(info, query, 'num_rat_pts', 'rational points')
    parse_ints(info, query, 'num_rat_wpts', 'rational Weierstrass points')
    parse_bracketed_posints(info,
                            query,
                            'torsion',
                            'torsion structure',
                            maxlength=4,
                            check_divisibility="increasing")
    parse_ints(info, query, 'torsion_order', 'torsion order')
    if 'torsion' in query and not 'torsion_order' in query:
        query['torsion_order'] = reduce(mul,
                                        [int(n) for n in query['torsion']], 1)
    if 'torsion' in query:
        query['torsion_subgroup'] = str(query['torsion']).replace(" ", "")
        query.pop('torsion')  # search using string key, not array of ints
    parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank')
    parse_ints(info, query, 'analytic_rank', 'analytic rank')
    # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
    if 'g20' in info and 'g21' in info and 'g22' in info:
        query['g2_inv'] = "['%s','%s','%s']" % (info['g20'], info['g21'],
                                                info['g22'])
    if 'class' in info:
        query['class'] = info['class']
    for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id',
                'geom_aut_grp_id'):
        if info.get(fld): query[fld] = info[fld]
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
コード例 #11
0
ファイル: main.py プロジェクト: sanni85/lmfdb
def belyi_search(info, query):
    info['geometry_types_list'] = geometry_types_list
    info['geometry_types_dict'] = geometry_types_dict
    info["belyi_galmap_url"] = lambda label: url_for_belyi_galmap_label(label)
    if 'group' in query:
        info['group'] = query['group']
    parse_bracketed_posints(info, query, 'abc_list', 'a, b, c', maxlength=3)
    if query.get('abc_list'):
        if len(query['abc_list']) == 3:
            a, b, c = sorted(query['abc_list'])
            query['a_s'] = a
            query['b_s'] = b
            query['c_s'] = c
        elif len(query['abc_list']) == 2:
            a, b = sorted(query['abc_list'])
            sub_query = []
            sub_query.append( {'a_s': a, 'b_s': b} )
            sub_query.append( {'b_s': a, 'c_s': b} )
            query['$or'] = sub_query
        elif len(query['abc_list']) == 1:
            a = query['abc_list'][0]
            query['$or'] = [{'a_s': a}, {'b_s': a}, {'c_s': a}]
        query.pop('abc_list')

    # a naive hack
    if info.get('abc'):
        for elt in ['a_s', 'b_s', 'c_s']:
            info_hack = {}
            info_hack[elt] = info['abc']
            parse_ints(info_hack, query, elt)

    parse_ints(info, query, 'g','g')
    parse_ints(info, query, 'deg', 'deg')
    parse_ints(info, query, 'orbit_size', 'orbit_size')
    # invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
    for fld in ['geomtype','group']:
        if info.get(fld):
            query[fld] = info[fld]
コード例 #12
0
ファイル: number_field.py プロジェクト: koffie/lmfdb
def number_field_search(info, query):
    parse_ints(info,query,'degree')
    parse_galgrp(info,query, qfield=('degree', 'galt'))
    parse_bracketed_posints(info,query,'signature',qfield=('degree','r2'),exactlength=2,extractor=lambda L: (L[0]+2*L[1],L[1]))
    parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs'))
    parse_ints(info,query,'class_number')
    parse_bracketed_posints(info,query,'class_group',check_divisibility='increasing',process=int)
    parse_primes(info,query,'ur_primes',name='Unramified primes',
                 qfield='ramps',mode='complement')
    # modes are now contained (in), exactly, include
    if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include':
        mode='append'
    elif 'ram_quantifier' in info and str(info['ram_quantifier']) == 'contained':
        mode='subsets'
    else:
        mode='exact'
    parse_primes(info,query,'ram_primes',name='Ramified primes',
                 qfield='ramps',mode=mode,radical='disc_rad')
    ## This seems not to be used
    #if 'lucky' in info:
    #    label = db.nf_fields.lucky(query, 0)
    #    if label:
    #        return redirect(url_for(".by_label", label=clean_input(label)))
    info['wnf'] = WebNumberField.from_data
コード例 #13
0
def hgm_search(info, query):
    family_search = False
    if info.get('Submit Family') or info.get('family'):
        family_search = True
        query['__title__'] = r'Hypergeometric Family over $\Q$ Search Result'
        query[
            '__err_title__'] = r'Hypergeometric Family over $\Q$ Search Input Error'
        query['__table__'] = db.hgm_families

    queryab = {}
    for param in [
            'A', 'B', 'A2', 'B2', 'A3', 'B3', 'A5', 'B5', 'A7', 'B7', 'Au2',
            'Bu2', 'Au3', 'Bu3', 'Au5', 'Bu5', 'Au7', 'Bu7'
    ]:
        parse_bracketed_posints(info,
                                queryab,
                                param,
                                split=False,
                                listprocess=lambda a: sorted(a, reverse=True))
    # Combine the parts of the query if there are A,B parts
    if queryab:
        queryabrev = {}
        for k in queryab.keys():
            queryabrev[k + 'rev'] = queryab[k]
        query['$or'] = [queryab, queryabrev]

    # generic, irreducible not in DB yet
    parse_ints(info, query, 'degree')
    parse_ints(info, query, 'weight')
    parse_bracketed_posints(info,
                            query,
                            'famhodge',
                            'family Hodge vector',
                            split=False)
    parse_restricted(info, query, 'sign', allowed=['+1', 1, -1], process=int)
    # Make a version to search reversed way
    if not family_search:
        parse_ints(info, query, 'conductor', 'Conductor', 'cond')
        parse_rational(info, query, 't')
        parse_bracketed_posints(info, query, 'hodge', 'Hodge vector')

    info['make_label'] = make_abt_label
    info['make_t_label'] = make_t_label
    info['ab_label'] = ab_label
    info['display_t'] = display_t
    info['family'] = family_search
    info['factorint'] = factorint
コード例 #14
0
ファイル: main.py プロジェクト: sanni85/lmfdb
def hgm_search(info, query):
    family_search = False
    if info.get('Submit Family') or info.get('family'):
        family_search = True
        query['__title__'] = r'Hypergeometric Family over $\Q$ Search Result'
        query['__err_title__'] = r'Hypergeometric Family over $\Q$ Search Input Error'
        query['__table__'] = db.hgm_families

    queryab = {}
    for param in ['A', 'B', 'A2', 'B2', 'A3', 'B3', 'A5', 'B5', 'A7', 'B7',
                  'Au2', 'Bu2', 'Au3', 'Bu3', 'Au5', 'Bu5', 'Au7', 'Bu7']:
        parse_bracketed_posints(info, queryab, param, split=False,
                                listprocess=lambda a: sorted(a, reverse=True))
    # Combine the parts of the query if there are A,B parts
    if queryab:
        queryabrev = {}
        for k in queryab.keys():
            queryabrev[k+'rev'] = queryab[k]
        query['$or'] = [queryab, queryabrev]

    # generic, irreducible not in DB yet
    parse_ints(info, query, 'degree')
    parse_ints(info, query, 'weight')
    parse_bracketed_posints(info, query, 'famhodge', 'family Hodge vector',split=False)
    parse_restricted(info, query, 'sign', allowed=['+1',1,-1], process=int)
    # Make a version to search reversed way
    if not family_search:
        parse_ints(info, query, 'conductor', 'Conductor' , 'cond')
        parse_rational(info, query, 't')
        parse_bracketed_posints(info, query, 'hodge', 'Hodge vector')

    info['make_label'] = make_abt_label
    info['make_t_label'] = make_t_label
    info['ab_label'] = ab_label
    info['display_t'] = display_t
    info['family'] = family_search
    info['factorint'] = factorint
コード例 #15
0
def higher_genus_w_automorphisms_search(**args):
    info = to_dict(args)
    bread = get_bread([("Search results", '')])
    C = base.getDBConnection()
    query = {}
    if 'jump_to' in info:
        labs = info['jump_to']
        if label_is_one_passport(labs):
            return render_passport({'passport_label': labs})
        elif label_is_one_family(labs):
            return render_family({'label': labs})
        else:
            flash_error(
                "The label %s is not a legitimate label for this data.", labs)
            return redirect(url_for(".index"))

    #allow for ; in signature
    if info.get('signature'):
        info['signature'] = info['signature'].replace(';', ',')

    try:
        parse_gap_id(info, query, 'group', 'Group')
        parse_ints(info, query, 'genus', name='Genus')
        parse_bracketed_posints(info,
                                query,
                                'signature',
                                split=False,
                                name='Signature',
                                keepbrackets=True)
        if query.get('signature'):
            query['signature'] = info['signature'] = str(
                sort_sign(ast.literal_eval(query['signature']))).replace(
                    ' ', '')
        parse_ints(info, query, 'dim', name='Dimension of the family')
        if 'inc_hyper' in info:
            if info['inc_hyper'] == 'exclude':
                query['hyperelliptic'] = False
            elif info['inc_hyper'] == 'only':
                query['hyperelliptic'] = True
        if 'inc_cyc_trig' in info:
            if info['inc_cyc_trig'] == 'exclude':
                query['cyclic_trigonal'] = False
            elif info['inc_cyc_trig'] == 'only':
                query['cyclic_trigonal'] = True
        if 'inc_full' in info:
            if info['inc_full'] == 'exclude':
                query['full_auto'] = {'$exists': True}
            elif info['inc_full'] == 'only':
                query['full_auto'] = {'$exists': False}

        query['cc.1'] = 1

    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    res = C.curve_automorphisms.passports.find(query).sort([
        ('genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING),
        ('cc'[0], pymongo.ASCENDING)
    ])
    nres = res.count()
    res = res.skip(start).limit(count)

    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    L = []
    for field in res:
        field['signature'] = ast.literal_eval(field['signature'])
        L.append(field)

    info['fields'] = L
    info['number'] = nres
    info['group_display'] = sg_pretty

    info['sign_display'] = sign_display
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    return render_template(
        "hgcwa-search.html",
        info=info,
        title=
        "Families of Higher Genus Curves with Automorphisms Search Result",
        bread=bread)
コード例 #16
0
def elliptic_curve_search(info):

    if info.get('download') == '1' and info.get('Submit') and info.get(
            'query'):
        return download_search(info)

    if not 'query' in info:
        info['query'] = {}

    bread = info.get('bread', [('Elliptic Curves', url_for(".index")),
                               ('Search Results', '.')])
    if 'jump' in info:
        label = info.get('label', '').replace(" ", "")
        # This label should be a full isogeny class label or a full
        # curve label (including the field_label component)
        try:
            nf, cond_label, iso_label, number = split_full_label(label.strip())
        except ValueError:
            info['err'] = ''
            return search_input_error(info, bread)

        return redirect(
            url_for(".show_ecnf",
                    nf=nf,
                    conductor_label=cond_label,
                    class_label=iso_label,
                    number=number), 301)

    query = {}

    if 'jinv' in info:
        if info.get('field', '').strip() == '2.2.5.1':
            info['jinv'] = info['jinv'].replace('phi', 'a')
        if info.get('field', '').strip() == '2.0.4.1':
            info['jinv'] = info['jinv'].replace('i', 'a')
    try:
        parse_ints(info, query, 'conductor_norm')
        parse_noop(info, query, 'conductor_label')
        parse_nf_string(info,
                        query,
                        'field',
                        name="base number field",
                        qfield='field_label')
        parse_nf_elt(info, query, 'jinv', name='j-invariant')
        parse_ints(info,
                   query,
                   'torsion',
                   name='Torsion order',
                   qfield='torsion_order')
        parse_bracketed_posints(info, query, 'torsion_structure', maxlength=2)
        if 'torsion_structure' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(
                mul, [int(n) for n in query['torsion_structure']], 1)
        parse_ints(info, query, field='isodeg', qfield='isogeny_degrees')
    except (TypeError, ValueError):
        return search_input_error(info, bread)

    if query.get('jinv'):
        query['jinv'] = ','.join(query['jinv'])

    if query.get('field_label') == '1.1.1.1':
        return redirect(url_for("ec.rational_elliptic_curves", **request.args),
                        301)

    if 'include_isogenous' in info and info['include_isogenous'] == 'off':
        info['number'] = 1
        query['number'] = 1

    if 'include_base_change' in info and info['include_base_change'] == 'off':
        query['base_change'] = []
    else:
        info['include_base_change'] = "on"

    if 'include_Q_curves' in info:
        if info['include_Q_curves'] == 'exclude':
            query['q_curve'] = False
        elif info['include_Q_curves'] == 'only':
            query['q_curve'] = True

    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne': 0}

    info['query'] = query
    count = parse_count(info, 50)
    start = parse_start(info)

    # make the query and trim results according to start/count:

    cursor = db_ecnf().find(query)
    nres = cursor.count()
    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC),
                       ('conductor_label', ASC), ('iso_nlabel', ASC),
                       ('number', ASC)]).skip(start).limit(count)

    res = list(res)
    for e in res:
        e['numb'] = str(e['number'])
        e['field_knowl'] = nf_display_knowl(e['field_label'],
                                            getDBConnection(),
                                            field_pretty(e['field_label']))

    info['curves'] = res  # [ECNF(e) for e in res]
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)
    info['field_pretty'] = field_pretty
    info['web_ainvs'] = web_ainvs
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    t = info.get('title', 'Elliptic Curve search results')
    return render_template("ecnf-search-results.html",
                           info=info,
                           credit=ecnf_credit,
                           bread=bread,
                           title=t)
コード例 #17
0
ファイル: elliptic_curve.py プロジェクト: MelinaCarmona/lmfdb
def elliptic_curve_search(info):

    if info.get('download') == '1' and info.get('Submit') and info.get('query'):
        return download_search(info)

    if 'SearchAgain' in info:
        return rational_elliptic_curves()

    query = {}
    bread = info.get('bread',[('Elliptic Curves', url_for("ecnf.index")), ('$\Q$', url_for(".rational_elliptic_curves")), ('Search Results', '.')])

    if 'jump' in info:
        label = info.get('label', '').replace(" ", "")
        m = match_lmfdb_label(label)
        if m:
            try:
                return by_ec_label(label)
            except ValueError:
                return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif label.startswith("Cremona:"):
            label = label[8:]
            m = match_cremona_label(label)
            if m:
                try:
                    return by_ec_label(label)
                except ValueError:
                    return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif match_cremona_label(label):
            return elliptic_curve_jump_error(label, info, cremona_label=True)
        elif label:
            # Try to parse a string like [1,0,3,2,4] as valid
            # Weistrass coefficients:
            lab = re.sub(r'\s','',label)
            lab = re.sub(r'^\[','',lab)
            lab = re.sub(r']$','',lab)
            try:
                labvec = lab.split(',')
                labvec = [QQ(str(z)) for z in labvec] # Rationals allowed
                E = EllipticCurve(labvec)
                # Now we do have a valid curve over Q, but it might
                # not be in the database.
                ainvs = [str(c) for c in E.minimal_model().ainvs()]
                xainvs = ''.join(['[',','.join(ainvs),']'])
                data = db_ec().find_one({'xainvs': xainvs})
                if data is None:
                    data = db_ec().find_one({'ainvs': ainvs})
                    if data is None:
                        info['conductor'] = E.conductor()
                        return elliptic_curve_jump_error(label, info, missing_curve=True)
                return by_ec_label(data['lmfdb_label'])
            except (TypeError, ValueError, ArithmeticError):
                return elliptic_curve_jump_error(label, info)
        else:
            query['label'] = ''

    try:
        parse_rational(info,query,'jinv','j-invariant')
        parse_ints(info,query,'conductor')
        parse_ints(info,query,'torsion','torsion order')
        parse_ints(info,query,'rank')
        parse_ints(info,query,'sha','analytic order of &#1064;')
        parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing')
        # speed up slow torsion_structure searches by also setting torsion
        if 'torsion_structure' in query and not 'torsion' in query:
            query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
        if 'include_cm' in info:
            if info['include_cm'] == 'exclude':
                query['cm'] = 0
            elif info['include_cm'] == 'only':
                query['cm'] = {'$ne' : 0}

        parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')

        parse_primes(info, query, 'surj_primes', name='surjective primes',
                     qfield='non-maximal_primes', mode='complement')
        if info.get('surj_quantifier') == 'exactly':
            mode = 'exact'
        else:
            mode = 'append'
        parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes',
                     qfield='non-maximal_primes',mode=mode)
    except ValueError as err:
        info['err'] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info,100)
    start = parse_start(info)

    if 'optimal' in info and info['optimal'] == 'on':
        # fails on 990h3
        query['number'] = 1

    info['query'] = query
    cursor = db_ec().find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0
    res = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING),
                       ('lmfdb_number', ASCENDING)]).skip(start).limit(count)
    info['curves'] = res
    info['format_ainvs'] = format_ainvs
    info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number'])
    info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1])
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)

    
    if nres == 1:
        info['report'] = 'unique match'
    elif nres == 2: 
        info['report'] = 'displaying both matches'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    #credit = 'John Cremona'
    #if 'non-surjective_primes' in query or 'non-maximal_primes' in query:
    #    credit += ' and Andrew Sutherland'

    t = info.get('title','Elliptic Curves search results')
    return render_template("ec-search-results.html", info=info, credit=ec_credit(), bread=bread, title=t)
コード例 #18
0
ファイル: number_field.py プロジェクト: alinabucur/lmfdb
def number_field_search(**args):
    info = to_dict(args)

    info["learnmore"] = [
        ("Global number field labels", url_for(".render_labels_page")),
        ("Galois group labels", url_for(".render_groups_page")),
        (Completename, url_for(".render_discriminants_page")),
        ("Quadratic imaginary class groups", url_for(".render_class_group_data")),
    ]
    t = "Global Number Field search results"
    bread = [("Global Number Fields", url_for(".number_field_render_webpage")), ("Search results", " ")]

    # for k in info.keys():
    #  nf_logger.debug(str(k) + ' ---> ' + str(info[k]))
    # nf_logger.debug('******************* '+ str(info['search']))

    if "natural" in info:
        query = {"label_orig": info["natural"]}
        try:
            parse_nf_string(info, query, "natural", name="Label", qfield="label")
            return redirect(url_for(".by_label", label=clean_input(query["label"])))
        except ValueError:
            query["err"] = info["err"]
            return search_input_error(query, bread)

    query = {}
    try:
        parse_galgrp(info, query, qfield="galois")
        parse_ints(info, query, "degree")
        parse_bracketed_posints(info, query, "signature", split=False, exactlength=2)
        parse_signed_ints(info, query, "discriminant", qfield=("disc_sign", "disc_abs_key"), parse_one=make_disc_key)
        parse_ints(info, query, "class_number")
        parse_bracketed_posints(info, query, "class_group", split=False, check_divisibility="increasing")
        parse_primes(
            info, query, "ur_primes", name="Unramified primes", qfield="ramps", mode="complement", to_string=True
        )
        if "ram_quantifier" in info and str(info["ram_quantifier"]) == "some":
            mode = "append"
        else:
            mode = "exact"
        parse_primes(info, query, "ram_primes", "ramified primes", "ramps", mode, to_string=True)
    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    if info.get("paging"):
        try:
            paging = int(info["paging"])
            if paging == 0:
                start = 0
        except:
            pass

    C = base.getDBConnection()
    # nf_logger.debug(query)
    info["query"] = dict(query)
    if "lucky" in args:
        one = C.numberfields.fields.find_one(query)
        if one:
            label = one["label"]
            return redirect(url_for(".by_label", clean_input(label)))

    fields = C.numberfields.fields

    res = fields.find(query)

    if "download" in info and info["download"] != "0":
        return download_search(info, res)

    res = res.sort([("degree", ASC), ("disc_abs_key", ASC), ("disc_sign", ASC)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if start >= nres:
        start -= (1 + (start - nres) / count) * count
    if start < 0:
        start = 0

    info["fields"] = res
    info["number"] = nres
    info["start"] = start
    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres)
        else:
            info["report"] = "displaying all %s matches" % nres

    info["wnf"] = WebNumberField.from_data
    return render_template("number_field_search.html", info=info, title=t, bread=bread)
コード例 #19
0
ファイル: number_field.py プロジェクト: MelinaCarmona/lmfdb
def number_field_search(info):

    info['learnmore'] = [
        ('Global number field labels', url_for(".render_labels_page")),
        ('Galois group labels', url_for(".render_groups_page")),
        (Completename, url_for(".render_discriminants_page")),
        ('Quadratic imaginary class groups',
         url_for(".render_class_group_data"))
    ]
    t = 'Global Number Field search results'
    bread = [('Global Number Fields', url_for(".number_field_render_webpage")),
             ('Search results', ' ')]

    if 'natural' in info:
        query = {'label_orig': info['natural']}
        try:
            parse_nf_string(info,
                            query,
                            'natural',
                            name="Label",
                            qfield='label')
            return redirect(
                url_for(".by_label", label=clean_input(query['label_orig'])))
        except ValueError:
            query['err'] = info['err']
            return search_input_error(query, bread)

    if 'algebra' in info:
        fields = info['algebra'].split('_')
        fields2 = [WebNumberField.from_coeffs(a) for a in fields]
        for j in range(len(fields)):
            if fields2[j] is None:
                fields2[j] = WebNumberField.fakenf(fields[j])
        t = 'Number field algebra'
        info = {}
        info = {'fields': fields2}
        return render_template("number_field_algebra.html",
                               info=info,
                               title=t,
                               bread=bread)

    query = {}
    try:
        parse_galgrp(info, query, qfield='galois')
        parse_ints(info, query, 'degree')
        parse_bracketed_posints(info,
                                query,
                                'signature',
                                split=False,
                                exactlength=2)
        parse_signed_ints(info,
                          query,
                          'discriminant',
                          qfield=('disc_sign', 'disc_abs_key'),
                          parse_one=make_disc_key)
        parse_ints(info, query, 'class_number')
        parse_bracketed_posints(info,
                                query,
                                'class_group',
                                split=False,
                                check_divisibility='increasing')
        parse_primes(info,
                     query,
                     'ur_primes',
                     name='Unramified primes',
                     qfield='ramps',
                     mode='complement',
                     to_string=True)
        # modes are now contained (in), exactly, include
        if 'ram_quantifier' in info and str(
                info['ram_quantifier']) == 'include':
            mode = 'append'
            parse_primes(info,
                         query,
                         'ram_primes',
                         'ramified primes',
                         'ramps',
                         mode,
                         to_string=True)
        elif 'ram_quantifier' in info and str(
                info['ram_quantifier']) == 'contained':
            parse_primes(info,
                         query,
                         'ram_primes',
                         'ramified primes',
                         'ramps_all',
                         'subsets',
                         to_string=False)
            pass  # build list
        else:
            mode = 'liststring'
            parse_primes(info, query, 'ram_primes', 'ramified primes',
                         'ramps_all', mode)
    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    # nf_logger.debug(query)
    info['query'] = dict(query)
    if 'lucky' in info:
        one = nfdb().find_one(query)
        if one:
            label = one['label']
            return redirect(url_for(".by_label", label=clean_input(label)))

    fields = nfdb()

    res = fields.find(query)
    res = res.sort([('degree', ASC), ('disc_abs_key', ASC),
                    ('disc_sign', ASC)])

    if 'download' in info and info['download'] != '0':
        return download_search(info, res)

    nres = res.count()
    res = res.skip(start).limit(count)

    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    info['fields'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    info['wnf'] = WebNumberField.from_data
    return render_template("number_field_search.html",
                           info=info,
                           title=t,
                           bread=bread)
コード例 #20
0
ファイル: main.py プロジェクト: akoutsianas/lmfdb
def elliptic_curve_search(**args):
    info = to_dict(args['data'])
    
    if 'download' in info and info['download'] != 0:
        return download_search(info)

    if not 'query' in info:
        info['query'] = {}
    
    bread = [('Elliptic Curves', url_for(".index")),
             ('Search Results', '.')]
    if 'jump' in info:
        label = info.get('label', '').replace(" ", "")
        # This label should be a full isogeny class label or a full
        # curve label (including the field_label component)
        try:
            nf, cond_label, iso_label, number = split_full_label(label.strip())
        except ValueError:
            info['err'] = ''
            return search_input_error(info, bread)

        return show_ecnf(nf, cond_label, iso_label, number)

    query = {}

    try:
        parse_ints(info,query,'conductor_norm')
        parse_noop(info,query,'conductor_label')
        parse_nf_string(info,query,'field',name="base number field",qfield='field_label')
        parse_nf_elt(info,query,'jinv',name='j-invariant')
        parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order')
        parse_bracketed_posints(info,query,'torsion_structure',maxlength=2)
        if 'torsion_structure' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
    except ValueError:
        return search_input_error(info, bread)

    if 'include_isogenous' in info and info['include_isogenous'] == 'off':
        info['number'] = 1
        query['number'] = 1

    if 'include_base_change' in info and info['include_base_change'] == 'off':
        query['base_change'] = []
    else:
        info['include_base_change'] = "on"

    if 'include_Q_curves' in info:
        if info['include_Q_curves'] == 'exclude':
            query['q_curve'] = False
        elif info['include_Q_curves'] == 'only':
            query['q_curve'] = True

    if 'include_cm' in info:
        if info['include_cm'] == 'exclude':
            query['cm'] = 0
        elif info['include_cm'] == 'only':
            query['cm'] = {'$ne' : 0}

    info['query'] = query
    count = parse_count(info, 50)
    start = parse_start(info)

    # make the query and trim results according to start/count:

    cursor = db_ecnf().find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0
    
    res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC), ('conductor_label', ASC), ('iso_nlabel', ASC), ('number', ASC)]).skip(start).limit(count)

    res = list(res)
    for e in res:
        e['numb'] = str(e['number'])
        e['field_knowl'] = nf_display_knowl(e['field_label'], getDBConnection(), field_pretty(e['field_label']))

    info['curves'] = res  # [ECNF(e) for e in res]
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)
    info['field_pretty'] = field_pretty
    info['web_ainvs'] = web_ainvs
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    t = 'Elliptic Curve search results'
    return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
コード例 #21
0
ファイル: elliptic_curve.py プロジェクト: haraldschilly/lmfdb
def elliptic_curve_search(info):

    if info.get('download') == '1' and info.get('Submit') and info.get('query'):
        return download_search(info)

    if 'SearchAgain' in info:
        return rational_elliptic_curves()

    query = {}
    bread = info.get('bread',[('Elliptic Curves', url_for("ecnf.index")), ('$\Q$', url_for(".rational_elliptic_curves")), ('Search Results', '.')])

    if 'jump' in info:
        label = info.get('label', '').replace(" ", "")
        m = match_lmfdb_label(label)
        if m:
            try:
                return by_ec_label(label)
            except ValueError:
                return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif label.startswith("Cremona:"):
            label = label[8:]
            m = match_cremona_label(label)
            if m:
                try:
                    return by_ec_label(label)
                except ValueError:
                    return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif match_cremona_label(label):
            return elliptic_curve_jump_error(label, info, cremona_label=True)
        elif label:
            # Try to parse a string like [1,0,3,2,4] as valid
            # Weistrass coefficients:
            lab = re.sub(r'\s','',label)
            lab = re.sub(r'^\[','',lab)
            lab = re.sub(r']$','',lab)
            try:
                labvec = lab.split(',')
                labvec = [QQ(str(z)) for z in labvec] # Rationals allowed
                E = EllipticCurve(labvec).minimal_model()
                # Now we do have a valid curve over Q, but it might
                # not be in the database.
                data = db_ec().find_one({'xainvs': EC_ainvs(E)})
                if data is None:
                    info['conductor'] = E.conductor()
                    return elliptic_curve_jump_error(label, info, missing_curve=True)
                return by_ec_label(data['lmfdb_label'])
            except (TypeError, ValueError, ArithmeticError):
                return elliptic_curve_jump_error(label, info)
        else:
            query['label'] = ''

    try:
        parse_rational(info,query,'jinv','j-invariant')
        parse_ints(info,query,'conductor')
        parse_ints(info,query,'torsion','torsion order')
        parse_ints(info,query,'rank')
        parse_ints(info,query,'sha','analytic order of &#1064;')
        parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing')
        # speed up slow torsion_structure searches by also setting torsion
        if 'torsion_structure' in query and not 'torsion' in query:
            query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)
        if 'include_cm' in info:
            if info['include_cm'] == 'exclude':
                query['cm'] = 0
            elif info['include_cm'] == 'only':
                query['cm'] = {'$ne' : 0}

        parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')

        parse_primes(info, query, 'surj_primes', name='surjective primes',
                     qfield='non-maximal_primes', mode='complement')
        if info.get('surj_quantifier') == 'exactly':
            mode = 'exact'
        else:
            mode = 'append'
        parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes',
                     qfield='non-maximal_primes',mode=mode)
    except ValueError as err:
        info['err'] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info,100)
    start = parse_start(info)

    if 'optimal' in info and info['optimal'] == 'on':
        # fails on 990h3
        query['number'] = 1

    info['query'] = query
    cursor = db_ec().find(query);
    cursor = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING),
                       ('lmfdb_number', ASCENDING)]);
    # equivalent to
    # cursor = res
    # nres = res.count()
    # if(start >= nres):
    #     start -= (1 + (start - nres) / count) * count
    # if(start < 0):
    #    start = 0
    # res = res.skip(start).limit(count)
    try:
        start, nres, res = search_cursor_timeout_decorator(cursor, start, count);
    except ValueError as err:
        info['err'] = err;
        return search_input_error(info, bread)

    info['curves'] = res
    info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number'])
    info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1])
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)

    if nres == 1:
        info['report'] = 'unique match'
    elif nres == 2: 
        info['report'] = 'displaying both matches'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    t = info.get('title','Elliptic Curves search results')
    return render_template("ec-search-results.html", info=info, credit=ec_credit(), bread=bread, title=t)
コード例 #22
0
ファイル: main.py プロジェクト: AurelPage/lmfdb
def genus2_curve_search(info):
    if 'jump' in info:
        jump = info["jump"].strip()
        if re.match(r'^\d+\.[a-z]+\.\d+\.\d+$',jump):
            return redirect(url_for_curve_label(jump), 301)
        else:
            if re.match(r'^\d+\.[a-z]+$', jump):
                return redirect(url_for_isogeny_class_label(jump), 301)
            else:
                # Handle direct Lhash input
                if re.match(r'^\#\d+$',jump) and ZZ(jump[1:]) < 2**61:
                    c = g2c_db_curves().find_one({'Lhash': jump[1:].strip()})
                    if c:
                        return redirect(url_for_isogeny_class_label(c["class"]), 301)
                    else:
                        errmsg = "hash %s not found"
                else:
                    errmsg = "%s is not a valid genus 2 curve or isogeny class label"
        flash_error (errmsg, jump)
        return redirect(url_for(".index"))

    if info.get('download','').strip() == '1':
        return download_search(info)

    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    bread = info.get('bread',(('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')))

    query = {}
    try:
        parse_ints(info,query,'abs_disc','absolute discriminant')
        parse_bool(info,query,'is_gl2_type','is of GL2-type')
        parse_bool(info,query,'has_square_sha','has square Sha')
        parse_bool(info,query,'locally_solvable','is locally solvable')
        parse_bool(info,query,'is_simple_geom','is geometrically simple')
        parse_ints(info,query,'cond','conductor')
        parse_ints(info,query,'num_rat_wpts','rational Weierstrass points')
        parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing")
        parse_ints(info,query,'torsion_order','torsion order')
        if 'torsion' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1)
        if 'torsion' in query:
            query['torsion_subgroup'] = str(query['torsion']).replace(" ","")
            query.pop('torsion') # search using string key, not array of ints
        parse_ints(info,query,'two_selmer_rank','2-Selmer rank')
        parse_ints(info,query,'analytic_rank','analytic rank')
        # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        if 'g20' in info and 'g21' in info and 'g22' in info:
            query['g2_inv'] = "['%s','%s','%s']"%(info['g20'], info['g21'], info['g22'])
        if 'class' in info:
            query['class'] = info['class']
        for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'):
            if info.get(fld): query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("g2c_search_results.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string)
    # Database query happens here
    info["query"] = query # save query for reuse in download_search
    cursor = g2c_db_curves().find(query, {'_id':False, 'label':True, 'eqn':True, 'st_group':True, 'is_gl2_type':True, 'is_simple_geom':True, 'analytic_rank':True})

    count = parse_count(info, 50)
    start = parse_start(info)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING),  ("disc_key", ASCENDING),  ("label", ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["class"] = class_from_curve_label(v["label"])
        v_clean["is_gl2_type"] = v["is_gl2_type"] 
        v_clean["is_simple_geom"] = v["is_simple_geom"] 
        v_clean["equation_formatted"] = list_to_min_eqn(literal_eval(v["eqn"]))
        v_clean["st_group_link"] = st_link_by_name(1,4,v['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
    info["start"] = start
    info["count"] = count
    info["more"] = int(start+count<nres)
    
    title = info.get('title','Genus 2 Curve search results')
    credit = credit_string
    
    return render_template("g2c_search_results.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
コード例 #23
0
ファイル: genus2_curve.py プロジェクト: nmascot/lmfdb
def genus2_curve_search(**args):
    info = to_dict(args)
    print "info", info
    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    query = {}  # database callable
    bread = [('Genus 2 Curves', url_for(".index")),
             ('$\Q$', url_for(".index_Q")),
             ('Search Results', '.')]
    #if 'SearchAgain' in args:
    #    return rational_genus2_curves()

    if 'jump' in args:
        label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+')
        if label_regex.match(info["jump"].strip()):
            data = render_curve_webpage_by_label(info["jump"].strip())
        else:
            data = "Invalid label"
        print data
        if data == "Invalid label":
            flash(Markup("The label <span style='color:black'>%s</span> is invalid."%(info["jump"])),"error")
            return redirect(url_for(".index"))
        if data == "Data for curve not found":
            flash(Markup("No genus 2 curve with label <span style='color:black'>%s</span> was found in the database."%(info["jump"])),"error")
            return redirect(url_for(".index"))
        return data
    try:
        parse_ints(info,query,'abs_disc','absolute discriminant')
        parse_bool(info,query,'is_gl2_type')
        parse_bool(info,query,'has_square_sha')
        parse_bool(info,query,'locally_solvable')
        for fld in ('st_group', 'real_geom_end_alg'):
            if info.get(fld): query[fld] = info[fld]
        for fld in ('aut_grp', 'geom_aut_grp'):
            #Encoded into a GAP ID.
            parse_bracketed_posints(info,query,fld,exactlength=2)
        # igusa and igusa_clebsch invariants not currently searchable
        parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing")
        parse_ints(info,query,'cond','conductor')
        parse_ints(info,query,'num_rat_wpts','Weierstrass points')
        parse_ints(info,query,'torsion_order')
        parse_ints(info,query,'two_selmer_rank','2-Selmer rank')
        parse_ints(info,query,'analytic_rank','analytic rank')
    except ValueError as err:
        info['err'] = str(err)
        return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string)

    info["query"] = dict(query)
    print "query", info["query"]
    print "info", info
    count = parse_count(info, 50)
    start = parse_start(info)
    cursor = db_g2c().curves.find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    res = cursor.sort([("cond", pymongo.ASCENDING),
                       ("class", pymongo.ASCENDING),
                       ("disc_key", pymongo.ASCENDING),
                       ("label", pymongo.ASCENDING)]).skip(start).limit(count)
    nres = res.count()
    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1,
                    min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["isog_label"] = v["class"]
        isogeny_class = db_g2c().isogeny_classes.find_one({'label' :
            isog_label(v["label"])})
        v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"]
        if isogeny_class["is_gl2_type"] == True:
            v_clean["is_gl2_type_display"] = '&#10004;' #checkmark
        else:
            v_clean["is_gl2_type_display"] = ''
        v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"])
        v_clean["st_group_name"] = st_group_name(isogeny_class['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean

    info["curve_url"] = lambda dbc: url_for_label(dbc['label'])
    info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label'])
    info["start"] = start
    info["count"] = count
    info["more"] = int(start+count<nres)
    credit = credit_string
    title = 'Genus 2 Curves search results'
    return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(),
            bread=bread, title=title)
    credit =  credit_string
    title = 'Genus 2 curves over $\Q$'
    bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', ' ')]
コード例 #24
0
ファイル: number_field.py プロジェクト: riiduan/lmfdb
def number_field_search(**args):
    info = to_dict(args)

    info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))]
    t = 'Global Number Field search results'
    bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search results', ' ')]

    # for k in info.keys():
    #  nf_logger.debug(str(k) + ' ---> ' + str(info[k]))
    # nf_logger.debug('******************* '+ str(info['search']))

    if 'natural' in info:
        query = {'label_orig': info['natural']}
        try:
            parse_nf_string(info,query,'natural',name="Label",qfield='label')
            return redirect(url_for(".by_label", label= clean_input(query['label'])))
        except ValueError:
            query['err'] = info['err']
            return search_input_error(query, bread)

    query = {}
    try:
        parse_galgrp(info,query, qfield='galois')
        parse_ints(info,query,'degree')
        parse_bracketed_posints(info,query,'signature',split=False,exactlength=2)
        parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key)
        parse_ints(info,query,'class_number')
        parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing')
        parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True)
        if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'some':
            mode = 'append'
        else:
            mode = 'exact'
        parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True)
    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    if info.get('paging'):
        try:
            paging = int(info['paging'])
            if paging == 0:
                start = 0
        except:
            pass

    C = base.getDBConnection()
    # nf_logger.debug(query)
    info['query'] = dict(query)
    if 'lucky' in args:
        one = C.numberfields.fields.find_one(query)
        if one:
            label = one['label']
            return redirect(url_for(".by_label", clean_input(label)))

    fields = C.numberfields.fields

    res = fields.find(query)

    if 'download' in info and info['download'] != '0':
        return download_search(info, res)

    res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['fields'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    info['wnf'] = WebNumberField.from_data
    return render_template("number_field_search.html", info=info, title=t, bread=bread)
コード例 #25
0
ファイル: number_field.py プロジェクト: haraldschilly/lmfdb
def number_field_search(info):

    info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))]
    t = 'Global Number Field search results'
    bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search Results', ' ')]

    if 'natural' in info:
        query = {'label_orig': info['natural']}
        try:
            parse_nf_string(info,query,'natural',name="Label",qfield='label')
            return redirect(url_for(".by_label", label=query['label']))
        except ValueError:
            query['err'] = info['err']
            return search_input_error(query, bread)

    if 'algebra' in info:
        fields=info['algebra'].split('_')
        fields2=[WebNumberField.from_coeffs(a) for a in fields]
        for j in range(len(fields)):
            if fields2[j] is None:
                fields2[j] = WebNumberField.fakenf(fields[j])
        t = 'Number field algebra'
        info = {}
        info = {'fields': fields2}
        return render_template("number_field_algebra.html", info=info, title=t, bread=bread)



    query = {}
    try:
        parse_galgrp(info,query, qfield='galois')
        parse_ints(info,query,'degree')
        parse_bracketed_posints(info,query,'signature',split=False,exactlength=2)
        parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key)
        parse_ints(info,query,'class_number')
        parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing')
        parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True)
        # modes are now contained (in), exactly, include
        if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include':
            mode = 'append'
            parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True)
        elif 'ram_quantifier' in info and str(info['ram_quantifier']) == 'contained':
            parse_primes(info,query,'ram_primes','ramified primes','ramps_all','subsets',to_string=False)
            pass # build list
        else:
            mode = 'liststring'
            parse_primes(info,query,'ram_primes','ramified primes','ramps_all',mode)
    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    # nf_logger.debug(query)
    info['query'] = dict(query)
    if 'lucky' in info:
        one = nfdb().find_one(query)
        if one:
            label = one['label']
            return redirect(url_for(".by_label", label=clean_input(label)))

    fields = nfdb()

    res = fields.find(query)
    res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)])

    if 'download' in info and info['download'] != '0':
        return download_search(info, res)

    # equivalent to
    # nres = res.count()
    #if(start >= nres):
    #    start -= (1 + (start - nres) / count) * count
    #if(start < 0):
    #    start = 0
    # res = res.skip(start).limit(count)
    try:
        start, nres, res = search_cursor_timeout_decorator(res, start, count);
    except ValueError as err:
        info['err'] = err;
        return search_input_error(info, bread)


    info['fields'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    info['wnf'] = WebNumberField.from_data
    return render_template("number_field_search.html", info=info, title=t, bread=bread)
コード例 #26
0
ファイル: main.py プロジェクト: haraldschilly/lmfdb
def higher_genus_w_automorphisms_search(**args):
    info = to_dict(args)
    bread = get_bread([("Search Results",'')])
    C = base.getDBConnection()
    query = {}
    if 'jump_to' in info:
        labs = info['jump_to']
        if label_is_one_passport(labs):
            return render_passport({'passport_label': labs})
        elif label_is_one_family(labs):
            return render_family({'label': labs})
        else:
            flash_error ("The label %s is not a legitimate label for this data.",labs)
            return redirect(url_for(".index"))

    #allow for ; in signature
    if info.get('signature'):
        info['signature'] = info['signature'].replace(';',',')

    try:
        parse_gap_id(info,query,'group','Group')
        parse_ints(info,query,'genus',name='Genus')
        parse_bracketed_posints(info,query,'signature',split=False,name='Signature',keepbrackets=True)
        if query.get('signature'):
            query['signature'] = info['signature'] = str(sort_sign(ast.literal_eval(query['signature']))).replace(' ','')
        parse_ints(info,query,'dim',name='Dimension of the family')
        if 'inc_hyper' in info:
            if info['inc_hyper'] == 'exclude':
                query['hyperelliptic'] = False
            elif info['inc_hyper'] == 'only':
                query['hyperelliptic'] = True
        if 'inc_cyc_trig' in info:
            if info['inc_cyc_trig'] == 'exclude':
                query['cyclic_trigonal'] = False
            elif info['inc_cyc_trig'] == 'only':
                query['cyclic_trigonal'] = True
        if 'inc_full' in info:
            if info['inc_full'] == 'exclude':
                query['full_auto'] = {'$exists': True}
            elif info['inc_full'] == 'only':
                query['full_auto'] = {'$exists': False}

        query['cc.1'] = 1

    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    if 'groupsize' in info and info['groupsize'] != '':
        err, result = add_group_order_range(query, info['groupsize'], C)
        if err != None:
            flash_error('Parse error on group order field. <font face="Courier New"><br />Given: ' + err + '<br />-------' + result + '</font>')
    res = C.curve_automorphisms.passports.find(query).sort([(
         'genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING),
        ('cc'[0],pymongo.ASCENDING)])

    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    L = [ ]
    for field in res:
        field['signature'] = ast.literal_eval(field['signature'])
        L.append(field)

    if 'download_magma' in info:
        return hgcwa_code_download_search(L,'magma')  #OR RES??????

    elif 'download_gap' in info:
        return hgcwa_code_download_search(L,'gap')  #OR L??????

    info['fields'] = L    
    info['number'] = nres
    info['group_display'] = sg_pretty
    info['show_downloads'] = len(L) > 0

    info['sign_display'] = sign_display
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(
                               nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    return render_template("hgcwa-search.html", info=info, title="Families of Higher Genus Curves with Automorphisms Search Result", credit=credit, bread=bread)
コード例 #27
0
ファイル: genus2_curve.py プロジェクト: nilsskoruppa/lmfdb
def genus2_curve_search(**args):
    info = to_dict(args)

    if 'download' in info and info['download'] == '1':
        return download_search(info)

    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    query = {}  # database callable
    bread = [('Genus 2 Curves', url_for(".index")),
             ('$\Q$', url_for(".index_Q")), ('Search Results', '.')]
    #if 'SearchAgain' in args:
    #    return rational_genus2_curves()

    if 'jump' in args:
        label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+')
        if label_regex.match(info["jump"].strip()):
            data = render_curve_webpage_by_label(info["jump"].strip())
        else:
            data = "Invalid label"
        if data == "Invalid label":
            flash(
                Markup(
                    "The label <span style='color:black'>%s</span> is invalid."
                    % (info["jump"])), "error")
            return redirect(url_for(".index"))
        if data == "Data for curve not found":
            flash(
                Markup(
                    "No genus 2 curve with label <span style='color:black'>%s</span> was found in the database."
                    % (info["jump"])), "error")
            return redirect(url_for(".index"))
        return data
    try:
        parse_ints(info, query, 'abs_disc', 'absolute discriminant')
        parse_bool(info, query, 'is_gl2_type')
        parse_bool(info, query, 'has_square_sha')
        parse_bool(info, query, 'locally_solvable')
        for fld in ('st_group', 'real_geom_end_alg'):
            if info.get(fld): query[fld] = info[fld]
        for fld in ('aut_grp', 'geom_aut_grp'):
            parse_bracketed_posints(info, query, fld,
                                    exactlength=2)  #Encoded into a GAP ID.
        # igusa and igusa_clebsch invariants not currently searchable
        parse_bracketed_posints(info,
                                query,
                                'torsion',
                                'torsion structure',
                                maxlength=4,
                                check_divisibility="increasing")
        parse_ints(info, query, 'cond', 'conductor')
        parse_ints(info, query, 'num_rat_wpts', 'Weierstrass points')
        parse_ints(info, query, 'torsion_order')
        parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank')
        parse_ints(info, query, 'analytic_rank', 'analytic rank')
    except ValueError as err:
        info['err'] = str(err)
        return render_template("search_results_g2.html",
                               info=info,
                               title='Genus 2 Curves Search Input Error',
                               bread=bread,
                               credit=credit_string)

    info["query"] = dict(query)
    count = parse_count(info, 50)
    start = parse_start(info)
    cursor = db_g2c().curves.find(query)
    nres = cursor.count()
    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    res = cursor.sort([("cond", pymongo.ASCENDING),
                       ("class", pymongo.ASCENDING),
                       ("disc_key", pymongo.ASCENDING),
                       ("label", pymongo.ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["isog_label"] = v["class"]
        isogeny_class = db_g2c().isogeny_classes.find_one(
            {'label': isog_label(v["label"])})
        v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"]
        if isogeny_class["is_gl2_type"] == True:
            v_clean["is_gl2_type_display"] = '&#10004;'  #checkmark
        else:
            v_clean["is_gl2_type_display"] = ''
        v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"])
        v_clean["st_group_name"] = st_group_name(isogeny_class['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda dbc: url_for_label(dbc['label'])
    info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label'])
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)

    credit = credit_string
    title = 'Genus 2 Curves search results'
    return render_template("search_results_g2.html",
                           info=info,
                           credit=credit,
                           learnmore=learnmore_list(),
                           bread=bread,
                           title=title)
コード例 #28
0
ファイル: main.py プロジェクト: praveenmunagapati/lmfdb
def galois_group_search(**args):
    info = to_dict(args)
    if info.get('jump_to'):
        return redirect(url_for('.by_label', label=info['jump_to']).strip(), 301)
    bread = get_bread([("Search Results", ' ')])
    C = base.getDBConnection()
    query = {}

    def includes_composite(s):
        s = s.replace(' ','').replace('..','-')
        for interval in s.split(','):
            if '-' in interval[1:]:
                ix = interval.index('-',1)
                a,b = int(interval[:ix]), int(interval[ix+1:])
                if b == a:
                    if a != 1 and not a.is_prime():
                        return True
                if b > a and b > 3:
                    return True
            else:
                a = ZZ(interval)
                if a != 1 and not a.is_prime():
                    return True
    try:
        parse_ints(info,query,'n','degree')
        parse_ints(info,query,'t')
        parse_ints(info,query,'order', qfield='orderkey', parse_singleton=make_order_key)
        parse_bracketed_posints(info, query, qfield='gapidfull', split=False, exactlength=2, keepbrackets=True, name='GAP id', field='gapid')
        for param in ('cyc', 'solv', 'prim', 'parity'):
            parse_bool(info,query,param,minus_one_to_zero=(param != 'parity'))
        degree_str = prep_ranges(info.get('n'))
        info['show_subs'] = degree_str is None or (LIST_RE.match(degree_str) and includes_composite(degree_str))
    except ValueError as err:
        info['err'] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info, 50)
    start = parse_start(info)

    if 'orderkey' in query and not ('n' in query):
        res = C.transitivegroups.groups.find(query).sort([('orderkey', pymongo.ASCENDING), ('gapid', pymongo.ASCENDING), ('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)])
    else:
        res = C.transitivegroups.groups.find(query).sort([('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['groups'] = res
    info['group_display'] = group_display_prettyC(C)
    info['report'] = "found %s groups" % nres
    info['yesno'] = yesno
    info['wgg'] = WebGaloisGroup.from_data
    info['start'] = start
    info['number'] = nres
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    return render_template("gg-search.html", info=info, title="Galois Group Search Result", bread=bread, credit=GG_credit)
コード例 #29
0
ファイル: elliptic_curve.py プロジェクト: nilsskoruppa/lmfdb
def elliptic_curve_search(**args):
    info = to_dict(args)

    if "download" in info and info["download"] != "0":
        return download_search(info)

    query = {}
    bread = [
        ("Elliptic Curves", url_for("ecnf.index")),
        ("$\Q$", url_for(".rational_elliptic_curves")),
        ("Search Results", "."),
    ]
    if "SearchAgain" in args:
        return rational_elliptic_curves()

    if "jump" in args:
        label = info.get("label", "").replace(" ", "")
        m = match_lmfdb_label(label)
        if m:
            try:
                return by_ec_label(label)
            except ValueError:
                return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif label.startswith("Cremona:"):
            label = label[8:]
            m = match_cremona_label(label)
            if m:
                try:
                    return by_ec_label(label)
                except ValueError:
                    return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif match_cremona_label(label):
            return elliptic_curve_jump_error(label, info, cremona_label=True)
        elif label:
            # Try to parse a string like [1,0,3,2,4] as valid
            # Weistrass coefficients:
            lab = re.sub(r"\s", "", label)
            lab = re.sub(r"^\[", "", lab)
            lab = re.sub(r"]$", "", lab)
            try:
                labvec = lab.split(",")
                labvec = [QQ(str(z)) for z in labvec]  # Rationals allowed
                E = EllipticCurve(labvec)
                # Now we do have a valid curve over Q, but it might
                # not be in the database.
                ainvs = [str(c) for c in E.minimal_model().ainvs()]
                data = db_ec().find_one({"ainvs": ainvs})
                if data is None:
                    info["conductor"] = E.conductor()
                    return elliptic_curve_jump_error(label, info, missing_curve=True)
                return by_ec_label(data["lmfdb_label"])
            except (TypeError, ValueError, ArithmeticError):
                return elliptic_curve_jump_error(label, info)
        else:
            query["label"] = ""

    try:
        parse_rational(info, query, "jinv", "j-invariant")
        parse_ints(info, query, "conductor")
        parse_ints(info, query, "torsion", "torsion order")
        parse_ints(info, query, "rank")
        parse_ints(info, query, "sha", "analytic order of &#1064;")
        parse_bracketed_posints(
            info, query, "torsion_structure", maxlength=2, process=str, check_divisibility="increasing"
        )
        parse_primes(
            info, query, "surj_primes", name="surjective primes", qfield="non-surjective_primes", mode="complement"
        )
        if info.get("surj_quantifier") == "exactly":
            mode = "exact"
        else:
            mode = "append"
        parse_primes(
            info, query, "nonsurj_primes", name="non-surjective primes", qfield="non-surjective_primes", mode=mode
        )
    except ValueError as err:
        info["err"] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info, 100)
    start = parse_start(info)

    if "optimal" in info and info["optimal"] == "on":
        # fails on 990h3
        query["number"] = 1

    info["query"] = query
    cursor = db_ec().find(query)
    nres = cursor.count()
    if start >= nres:
        start -= (1 + (start - nres) / count) * count
    if start < 0:
        start = 0
    res = (
        cursor.sort([("conductor", ASCENDING), ("iso_nlabel", ASCENDING), ("lmfdb_number", ASCENDING)])
        .skip(start)
        .limit(count)
    )
    info["curves"] = res
    info["format_ainvs"] = format_ainvs
    info["curve_url"] = lambda dbc: url_for(
        ".by_triple_label",
        conductor=dbc["conductor"],
        iso_label=split_lmfdb_label(dbc["lmfdb_iso"])[1],
        number=dbc["lmfdb_number"],
    )
    info["iso_url"] = lambda dbc: url_for(
        ".by_double_iso_label", conductor=dbc["conductor"], iso_label=split_lmfdb_label(dbc["lmfdb_iso"])[1]
    )
    info["number"] = nres
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)

    if nres == 1:
        info["report"] = "unique match"
    elif nres == 2:
        info["report"] = "displaying both matches"
    else:
        if nres > count or start != 0:
            info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres)
        else:
            info["report"] = "displaying all %s matches" % nres
    credit = "John Cremona"
    if "non-surjective_primes" in query:
        credit += "and Andrew Sutherland"
    t = "Elliptic Curves search results"
    return render_template("search_results.html", info=info, credit=credit, bread=bread, title=t)
コード例 #30
0
ファイル: elliptic_curve.py プロジェクト: ZhHong/lmfdb
def elliptic_curve_search(**args):
    info = to_dict(args)

    if 'download' in info and info['download'] != '0':
        return download_search(info)

    query = {}
    bread = [('Elliptic Curves', url_for("ecnf.index")),
             ('$\Q$', url_for(".rational_elliptic_curves")),
             ('Search Results', '.')]
    if 'SearchAgain' in args:
        return rational_elliptic_curves()

    if 'jump' in args:
        label = info.get('label', '').replace(" ", "")
        m = match_lmfdb_label(label)
        if m:
            try:
                return by_ec_label(label)
            except ValueError:
                return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif label.startswith("Cremona:"):
            label = label[8:]
            m = match_cremona_label(label)
            if m:
                try:
                    return by_ec_label(label)
                except ValueError:
                    return elliptic_curve_jump_error(label, info, wellformed_label=True)
        elif match_cremona_label(label):
            return elliptic_curve_jump_error(label, info, cremona_label=True)
        elif label:
            # Try to parse a string like [1,0,3,2,4] as valid
            # Weistrass coefficients:
            lab = re.sub(r'\s','',label)
            lab = re.sub(r'^\[','',lab)
            lab = re.sub(r']$','',lab)
            try:
                labvec = lab.split(',')
                labvec = [QQ(str(z)) for z in labvec] # Rationals allowed
                E = EllipticCurve(labvec)
                # Now we do have a valid curve over Q, but it might
                # not be in the database.
                ainvs = [str(c) for c in E.minimal_model().ainvs()]
                data = db_ec().find_one({'ainvs': ainvs})
                if data is None:
                    info['conductor'] = E.conductor()
                    return elliptic_curve_jump_error(label, info, missing_curve=True)
                return by_ec_label(data['lmfdb_label'])
            except (TypeError, ValueError, ArithmeticError):
                return elliptic_curve_jump_error(label, info)
        else:
            query['label'] = ''

    try:
        parse_rational(info,query,'jinv','j-invariant')
        parse_ints(info,query,'conductor')
        parse_ints(info,query,'torsion','torsion order')
        parse_ints(info,query,'rank')
        parse_ints(info,query,'sha','analytic order of &#1064;')
        parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing')
        if 'include_cm' in info:
            if info['include_cm'] == 'exclude':
                query['cm'] = 0
            elif info['include_cm'] == 'only':
                query['cm'] = {'$ne' : 0}

        parse_primes(info, query, 'surj_primes', name='surjective primes',
                     qfield='non-surjective_primes', mode='complement')
        if info.get('surj_quantifier') == 'exactly':
            mode = 'exact'
        else:
            mode = 'append'
        parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes',
                     qfield='non-surjective_primes',mode=mode)
    except ValueError as err:
        info['err'] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info,100)
    start = parse_start(info)

    if 'optimal' in info and info['optimal'] == 'on':
        # fails on 990h3
        query['number'] = 1

    info['query'] = query
    cursor = db_ec().find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0
    res = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING),
                       ('lmfdb_number', ASCENDING)]).skip(start).limit(count)
    info['curves'] = res
    info['format_ainvs'] = format_ainvs
    info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number'])
    info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1])
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)

    
    if nres == 1:
        info['report'] = 'unique match'
    elif nres == 2: 
        info['report'] = 'displaying both matches'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    credit = 'John Cremona'
    if 'non-surjective_primes' in query:
        credit += 'and Andrew Sutherland'
    t = 'Elliptic Curves search results'
    return render_template("search_results.html", info=info, credit=credit, bread=bread, title=t)
コード例 #31
0
ファイル: main.py プロジェクト: haraldschilly/lmfdb
def hgm_search(info):
    #info = to_dict(args)
    bread = get_bread([("Search Results", '')])
    query = {}
    queryab = {}
    queryabrev = {}
    if 'jump_to' in info:
        label = clean_input(info['jump_to'])
        if HGM_LABEL_RE.match(label):
            return render_hgm_webpage(normalize_motive(label))
        if HGM_FAMILY_LABEL_RE.match(label):
            return render_hgm_family_webpage(normalize_family(label))
        flash_error('%s is not a valid label for a hypergeometric motive or family of hypergeometric motives', label)
        return redirect(url_for(".index"))

    family_search = False
    if info.get('Submit Family') or info.get('family'):
        family_search = True

    # generic, irreducible not in DB yet

    try:
        parse_ints(info, query, 'degree')
        parse_ints(info, query, 'weight')
        parse_bracketed_posints(info, query, 'famhodge', 'family Hodge vector',split=False)
        parse_restricted(info, query, 'sign', allowed=['+1',1,-1], process=int)
        for param in ['A', 'B', 'A2', 'B2', 'A3', 'B3', 'A5', 'B5', 'A7', 'B7',
            'Au2', 'Bu2', 'Au3', 'Bu3', 'Au5', 'Bu5', 'Au7', 'Bu7']:
            parse_bracketed_posints(info, queryab, param, split=False,
                listprocess=lambda a: sorted(a, reverse=True))
        # Make a version to search reversed way
        if not family_search:
            parse_ints(info, query, 'conductor', 'Conductor' , 'cond')
            parse_rational(info, query, 't')
            parse_bracketed_posints(info, query, 'hodge', 'Hodge vector')
    except ValueError:
        if family_search:
            return render_template("hgm-search.html", info=info, title="Hypergeometric Family over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())
        return render_template("hgm-search.html", info=info, title="Hypergeometric Motive over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())

    # Now combine the parts of the query if there are A,B parts
    if queryab != {}:
        for k in queryab.keys():
            queryabrev[k+'rev'] = queryab[k]
        queryab.update(query)
        queryabrev.update(query)
        query = {'$or':[queryab, queryabrev]}
    print query
    count_default = 20
    if info.get('count'):
        try:
            count = int(info['count'])
        except:
            count = count_default
    else:
        count = count_default
    info['count'] = count

    start_default = 0
    if info.get('start'):
        try:
            start = int(info['start'])
            if(start < 0):
                start += (1 - (start + 1) / count) * count
        except:
            start = start_default
    else:
        start = start_default
    if info.get('paging'):
        try:
            paging = int(info['paging'])
            if paging == 0:
                start = 0
        except:
            pass

    # logger.debug(query)
    if family_search:
        #query['leader'] = '1'
        res = familydb().find(query).sort([('label', pymongo.ASCENDING)])
    else:
        res = motivedb().find(query).sort([('cond', pymongo.ASCENDING), ('label', pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['motives'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    info['make_label'] = make_abt_label
    info['make_t_label'] = make_t_label
    info['ab_label'] = ab_label
    info['display_t'] = display_t
    info['family'] = family_search
    info['factorint'] = factorint

    if family_search:
        return render_template("hgm-search.html", info=info, title="Hypergeometric Family over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())
    else:
        return render_template("hgm-search.html", info=info, title="Hypergeometric Motive over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())
コード例 #32
0
ファイル: main.py プロジェクト: nilsskoruppa/lmfdb
def elliptic_curve_search(**args):
    info = to_dict(args['data'])
    
    if 'download' in info and info['download'] != 0:
        return download_search(info)
    
    bread = [('Elliptic Curves', url_for(".index")),
             ('Search Results', '.')]
    if 'jump' in info:
        label = info.get('label', '').replace(" ", "")
        # This label should be a full isogeny class label or a full
        # curve label (including the field_label component)
        try:
            nf, cond_label, iso_label, number = split_full_label(label.strip())
        except ValueError:
            if not 'query' in info:
                info['query'] = {}
            info['err'] = ''
            return search_input_error(info, bread)

        return show_ecnf(nf, cond_label, iso_label, number)

    query = {}

    try:
        parse_ints(info,query,'conductor_norm')
        parse_noop(info,query,'conductor_label')
        parse_nf_string(info,query,'field',name="base number field",qfield='field_label')
        parse_nf_elt(info,query,'jinv',name='j-invariant')
        parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order')
        parse_bracketed_posints(info,query,'torsion_structure',maxlength=2)
    except ValueError:
        return search_input_error(info, bread)

    if 'include_isogenous' in info and info['include_isogenous'] == 'off':
        info['number'] = 1
        query['number'] = 1

    if 'include_base_change' in info and info['include_base_change'] == 'off':
        query['base_change'] = []
    else:
        info['include_base_change'] = "on"

    info['query'] = query
    count = parse_count(info, 50)
    start = parse_start(info)

    # make the query and trim results according to start/count:

    cursor = db_ecnf().find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0
    
    res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC), ('conductor_label', ASC), ('iso_nlabel', ASC), ('number', ASC)]).skip(start).limit(count)

    res = list(res)
    for e in res:
        e['numb'] = str(e['number'])
        e['field_knowl'] = nf_display_knowl(e['field_label'], getDBConnection(), field_pretty(e['field_label']))

    info['curves'] = res  # [ECNF(e) for e in res]
    info['number'] = nres
    info['start'] = start
    info['count'] = count
    info['more'] = int(start + count < nres)
    info['field_pretty'] = field_pretty
    info['web_ainvs'] = web_ainvs
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    t = 'Elliptic Curve search results'
    return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
コード例 #33
0
ファイル: number_field.py プロジェクト: jwj61/lmfdb
def number_field_search(**args):
    info = to_dict(args)

    info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))]
    t = 'Global Number Field search results'
    bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search results', ' ')]

    # for k in info.keys():
    #  nf_logger.debug(str(k) + ' ---> ' + str(info[k]))
    # nf_logger.debug('******************* '+ str(info['search']))

    if 'natural' in info:
        query = {'label_orig': info['natural']}
        try:
            parse_nf_string(info,query,'natural',name="Label",qfield='label')
            return redirect(url_for(".by_label", label= clean_input(query['label_orig'])))
        except ValueError:
            query['err'] = info['err']
            return search_input_error(query, bread)

    query = {}
    try:
        parse_galgrp(info,query, qfield='galois')
        parse_ints(info,query,'degree')
        parse_bracketed_posints(info,query,'signature',split=False,exactlength=2)
        parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key)
        parse_ints(info,query,'class_number')
        parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing')
        parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True)
        # modes are now contained (in), exactly, include
        if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include':
            mode = 'append'
            parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True)
        elif 'ram_quantifier' in info and str(info['ram_quantifier']) == 'contained':
            parse_primes(info,query,'ram_primes','ramified primes','ramps_all','subsets',to_string=False)
            pass # build list
        else:
            mode = 'liststring'
            parse_primes(info,query,'ram_primes','ramified primes','ramps_all',mode)
    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    if info.get('paging'):
        try:
            paging = int(info['paging'])
            if paging == 0:
                start = 0
        except:
            pass

    C = base.getDBConnection()
    # nf_logger.debug(query)
    info['query'] = dict(query)
    if 'lucky' in args:
        one = C.numberfields.fields.find_one(query)
        if one:
            label = one['label']
            return redirect(url_for(".by_label", label=clean_input(label)))

    fields = C.numberfields.fields

    res = fields.find(query)
    res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)])

    if 'download' in info and info['download'] != '0':
        return download_search(info, res)

    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['fields'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    info['wnf'] = WebNumberField.from_data
    return render_template("number_field_search.html", info=info, title=t, bread=bread)
コード例 #34
0
ファイル: genus2_curve.py プロジェクト: akoutsianas/lmfdb
def genus2_curve_search(**args):
    info = to_dict(args['data'])
    if 'jump' in info:
        jump = info["jump"].strip()
        curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$')
        if curve_label_regex.match(jump):
            return redirect(url_for_curve_label(jump), 301)
        else:
            class_label_regex = re.compile(r'\d+\.[a-z]+$')
            if class_label_regex.match(jump):
                return redirect(url_for_isogeny_class_label(jump), 301)
            else:
                # Handle direct Lhash input
                class_label_regex = re.compile(r'#\d+$')
                if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61:
                    c = g2cdb().isogeny_classes.find_one({'Lhash': jump[1:].strip()})
                    if c:
                        return redirect(url_for_isogeny_class_label(c["label"]), 301)
                    else:
                        errmsg = "Hash not found"
                else:
                    errmsg = "Invalid label"
        flash(Markup(errmsg + " <span style='color:black'>%s</span>"%(jump)),"error")
        return redirect(url_for(".index"))

    if 'download' in info and info['download'] == '1':
        return download_search(info)
    
    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    bread = info.get('bread',(('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')))

    query = {}
    try:
        parse_ints(info,query,'abs_disc','absolute discriminant')
        parse_bool(info,query,'is_gl2_type')
        parse_bool(info,query,'has_square_sha')
        parse_bool(info,query,'locally_solvable')
        parse_bool(info,query,'is_simple_geom')
        parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing")
        parse_ints(info,query,'cond')
        parse_ints(info,query,'num_rat_wpts','Weierstrass points')
        parse_ints(info,query,'torsion_order')
        if 'torsion' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1)
        parse_ints(info,query,'two_selmer_rank','2-Selmer rank')
        parse_ints(info,query,'analytic_rank','analytic rank')
        # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        if 'g20' in info and 'g21' in info and 'g22' in info:
            query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ]
        if 'class' in info:
            query['class'] = info['class']
        for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'):
            if info.get(fld): query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string)
    info["query"] = dict(query)
    
    # Database query happens here
    cursor = g2cdb().curves.find(query,{'_id':int(0),'label':int(1),'min_eqn':int(1),'st_group':int(1),'is_gl2_type':int(1),'analytic_rank':int(1)})

    count = parse_count(info, 50)
    start = parse_start(info)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING),  ("disc_key", ASCENDING),  ("label", ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["class"] = class_from_curve_label(v["label"])
        v_clean["is_gl2_type"] = v["is_gl2_type"] 
        v_clean["is_gl2_type_display"] = '&#10004;' if v["is_gl2_type"] else '' # display checkmark if true, blank otherwise
        v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"])
        v_clean["st_group_name"] = st_group_name(v['st_group'])
        v_clean["st_group_href"] = st_group_href(v['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
    info["start"] = start
    info["count"] = count
    info["more"] = int(start+count<nres)
    
    title = info.get('title','Genus 2 Curve search results')
    credit = credit_string
    
    return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
コード例 #35
0
ファイル: genus2_curve.py プロジェクト: akoutsianas/lmfdb
def genus2_curve_search(**args):
    info = to_dict(args['data'])
    if 'jump' in info:
        jump = info["jump"].strip()
        curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$')
        if curve_label_regex.match(jump):
            return redirect(url_for_curve_label(jump), 301)
        else:
            class_label_regex = re.compile(r'\d+\.[a-z]+$')
            if class_label_regex.match(jump):
                return redirect(url_for_isogeny_class_label(jump), 301)
            else:
                # Handle direct Lhash input
                class_label_regex = re.compile(r'#\d+$')
                if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61:
                    c = g2cdb().isogeny_classes.find_one(
                        {'Lhash': jump[1:].strip()})
                    if c:
                        return redirect(
                            url_for_isogeny_class_label(c["label"]), 301)
                    else:
                        errmsg = "Hash not found"
                else:
                    errmsg = "Invalid label"
        flash(Markup(errmsg + " <span style='color:black'>%s</span>" % (jump)),
              "error")
        return redirect(url_for(".index"))

    if 'download' in info and info['download'] == '1':
        return download_search(info)

    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    bread = info.get('bread',
                     (('Genus 2 Curves', url_for(".index")),
                      ('$\Q$', url_for(".index_Q")), ('Search Results', '.')))

    query = {}
    try:
        parse_ints(info, query, 'abs_disc', 'absolute discriminant')
        parse_bool(info, query, 'is_gl2_type')
        parse_bool(info, query, 'has_square_sha')
        parse_bool(info, query, 'locally_solvable')
        parse_bool(info, query, 'is_simple_geom')
        parse_bracketed_posints(info,
                                query,
                                'torsion',
                                'torsion structure',
                                maxlength=4,
                                check_divisibility="increasing")
        parse_ints(info, query, 'cond')
        parse_ints(info, query, 'num_rat_wpts', 'Weierstrass points')
        parse_ints(info, query, 'torsion_order')
        if 'torsion' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(mul,
                                            [int(n) for n in query['torsion']],
                                            1)
        parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank')
        parse_ints(info, query, 'analytic_rank', 'analytic rank')
        # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        if 'g20' in info and 'g21' in info and 'g22' in info:
            query['g2inv'] = [info['g20'], info['g21'], info['g22']]
        if 'class' in info:
            query['class'] = info['class']
        for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id',
                    'geom_aut_grp_id'):
            if info.get(fld): query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("search_results_g2.html",
                               info=info,
                               title='Genus 2 Curves Search Input Error',
                               bread=bread,
                               credit=credit_string)
    info["query"] = dict(query)

    # Database query happens here
    cursor = g2cdb().curves.find(
        query, {
            '_id': int(0),
            'label': int(1),
            'min_eqn': int(1),
            'st_group': int(1),
            'is_gl2_type': int(1),
            'analytic_rank': int(1)
        })

    count = parse_count(info, 50)
    start = parse_start(info)
    nres = cursor.count()
    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING),
                       ("disc_key", ASCENDING),
                       ("label", ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["class"] = class_from_curve_label(v["label"])
        v_clean["is_gl2_type"] = v["is_gl2_type"]
        v_clean["is_gl2_type_display"] = '&#10004;' if v[
            "is_gl2_type"] else ''  # display checkmark if true, blank otherwise
        v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"])
        v_clean["st_group_name"] = st_group_name(v['st_group'])
        v_clean["st_group_href"] = st_group_href(v['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)

    title = info.get('title', 'Genus 2 Curve search results')
    credit = credit_string

    return render_template("search_results_g2.html",
                           info=info,
                           credit=credit,
                           learnmore=learnmore_list(),
                           bread=bread,
                           title=title)
コード例 #36
0
def genus2_curve_search(**args):
    info = to_dict(args)
    
    if 'download' in info and info['download'] == '1':
        return download_search(info)
    
    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    query = {}  # database callable
    bread = [('Genus 2 Curves', url_for(".index")),
             ('$\Q$', url_for(".index_Q")),
             ('Search Results', '.')]
    #if 'SearchAgain' in args:
    #    return rational_genus2_curves()

    if 'jump' in args:
        curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$')
        if curve_label_regex.match(info["jump"].strip()):
            data = render_curve_webpage_by_label(info["jump"].strip())
        else:
            class_label_regex = re.compile(r'\d+\.[a-z]+$')
            if class_label_regex.match(info["jump"].strip()):
                data = render_isogeny_class(info["jump"].strip())
            else:
                class_label_regex = re.compile(r'#\d+$')
                if class_label_regex.match(info["jump"].strip()) and ZZ(info["jump"][1:]) < 2**61:
                    c = g2cdb().isogeny_classes.find_one({'hash':int(info["jump"][1:])})
                    if c:
                        data = render_isogeny_class(c["label"])
                    else:
                        data = "Hash not found"
                else:
                    data = "Invalid label"
        if isinstance(data,str):
            flash(Markup(data + " <span style='color:black'>%s</span>"%(info["jump"])),"error")
            return redirect(url_for(".index"))
        return data
    try:
        parse_ints(info,query,'abs_disc','absolute discriminant')
        parse_bool(info,query,'is_gl2_type')
        parse_bool(info,query,'has_square_sha')
        parse_bool(info,query,'locally_solvable')
        parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing")
        parse_ints(info,query,'cond','conductor')
        parse_ints(info,query,'num_rat_wpts','Weierstrass points')
        parse_ints(info,query,'torsion_order')
        parse_ints(info,query,'two_selmer_rank','2-Selmer rank')
        parse_ints(info,query,'analytic_rank','analytic rank')
        # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        if info.get('g20') and info.get('g21') and info.get('g22'):
            query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ]
        for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'):
            if info.get(fld): query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string)
    info["query"] = dict(query)
    count = parse_count(info, 50)
    start = parse_start(info)
    cursor = g2cdb().curves.find(query)
    nres = cursor.count()
    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    res = cursor.sort([("cond", pymongo.ASCENDING), ("class", pymongo.ASCENDING),  ("disc_key", pymongo.ASCENDING),  ("label", pymongo.ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["isog_label"] = v["class"]
        isogeny_class = g2cdb().isogeny_classes.find_one({'label' :
            isog_label(v["label"])})
        v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"]
        if isogeny_class["is_gl2_type"] == True:
            v_clean["is_gl2_type_display"] = '&#10004;' #checkmark
        else:
            v_clean["is_gl2_type_display"] = ''
        v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"])
        v_clean["st_group_name"] = st_group_name(isogeny_class['st_group'])
        v_clean["st_group_href"] = st_group_href(isogeny_class['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda dbc: url_for_label(dbc['label'])
    info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label'])
    info["start"] = start
    info["count"] = count
    info["more"] = int(start+count<nres)
    
    credit = credit_string
    title = 'Genus 2 Curves search results'
    return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
コード例 #37
0
def belyi_search(info):
    if 'jump' in info:
        jump = info["jump"].strip()
        if re.match(r'^\d+T\d+-\[\d+,\d+,\d+\]-\d+-\d+-\d+-g\d+-[a-z]+$',
                    jump):
            return redirect(url_for_belyi_galmap_label(jump), 301)
        else:
            if re.match(r'^\d+T\d+-\[\d+,\d+,\d+\]-\d+-\d+-\d+-g\d+$', jump):
                return redirect(url_for_belyi_passport_label(jump), 301)
            else:
                errmsg = "%s is not a valid Belyi map or passport label"
        flash_error(errmsg, jump)
        return redirect(url_for(".index"))
    if info.get('download', '').strip():
        return download_search(info)

    #search options
    info['geometry_types_list'] = geometry_types_list
    info['geometry_types_dict'] = geometry_types_dict

    bread = info.get('bread', (('Belyi Maps', url_for(".index")),
                               ('Search Results', '.')))

    query = {}
    try:
        if 'group' in query:
            info['group'] = query['group']
        parse_bracketed_posints(info,
                                query,
                                'abc_list',
                                'a, b, c',
                                maxlength=3)
        if query.get('abc_list'):
            if len(query['abc_list']) == 3:
                a, b, c = sorted(query['abc_list'])
                query['a_s'] = a
                query['b_s'] = b
                query['c_s'] = c
            elif len(query['abc_list']) == 2:
                a, b = sorted(query['abc_list'])
                sub_query = []
                sub_query.append({
                    'a_s': a,
                    'b_s': b
                })
                sub_query.append({
                    'b_s': a,
                    'c_s': b
                })
                query['$or'] = sub_query
            elif len(query['abc_list']) == 1:
                a = query['abc_list'][0]
                query['$or'] = [{
                    'a_s': a
                }, {
                    'b_s': a
                }, {
                    'c_s': a
                }]
            query.pop('abc_list')

        # a naive hack
        if info.get('abc'):
            for elt in ['a_s', 'b_s', 'c_s']:
                info_hack = {}
                info_hack[elt] = info['abc']
                parse_ints(info_hack, query, elt)

        parse_ints(info, query, 'g', 'g')
        parse_ints(info, query, 'deg', 'deg')
        parse_ints(info, query, 'orbit_size', 'orbit_size')
        # invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        for fld in ['geomtype', 'group']:
            if info.get(fld):
                query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("belyi_search_results.html",
                               info=info,
                               title='Belyi Maps Search Input Error',
                               bread=bread,
                               credit=credit_string)

    # Database query happens here
    info["query"] = query  # save query for reuse in download_search
    cursor = belyi_db_galmaps().find(
        query, {
            '_id': False,
            'label': True,
            'group': True,
            'abc': True,
            'g': True,
            'deg': True,
            'geomtype': True,
            'orbit_size': True
        })

    count = parse_count(info, 50)
    start = parse_start(info)
    nres = cursor.count()
    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    res = cursor.sort([("deg", ASCENDING), ("group_num", ASCENDING),
                       ("g", ASCENDING),
                       ("label", ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        for key in ('label', 'group', 'deg', 'g', 'orbit_size'):
            v_clean[key] = v[key]
        v_clean['geomtype'] = geometry_types_dict[v['geomtype']]
        res_clean.append(v_clean)

    info["belyi_galmaps"] = res_clean
    info["belyi_galmap_url"] = lambda label: url_for_belyi_galmap_label(label)
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)

    title = info.get('title', 'Belyi map search results')
    credit = credit_string

    return render_template("belyi_search_results.html",
                           info=info,
                           credit=credit,
                           learnmore=learnmore_list(),
                           bread=bread,
                           title=title)
コード例 #38
0
ファイル: main.py プロジェクト: lexmart/lmfdb
def higher_genus_w_automorphisms_search(**args):
    info = to_dict(args)
    bread = get_bread([("Search results", '')])
    C = base.getDBConnection()
    query = {}
    if 'jump_to' in info:
        labs = info['jump_to']
        if label_is_one_passport(labs):
            return render_passport({'passport_label': labs})
        elif label_is_one_family(labs):
            return render_family({'label': labs})
        else:
            flash_error(
                "The label %s is not a legitimate label for this data.", labs)
            return redirect(url_for(".index"))

    #allow for ; in signature
    if info.get('signature'):
        info['signature'] = info['signature'].replace(';', ',')

    try:
        parse_gap_id(info, query, 'group', 'Group')
        parse_ints(info, query, 'genus', name='Genus')
        parse_bracketed_posints(info,
                                query,
                                'signature',
                                split=False,
                                name='Signature',
                                keepbrackets=True)
        if query.get('signature'):
            query['signature'] = info['signature'] = str(
                sort_sign(ast.literal_eval(query['signature']))).replace(
                    ' ', '')
        parse_ints(info, query, 'dim', name='Dimension of the family')
        if 'inc_hyper' in info:
            if info['inc_hyper'] == 'exclude':
                query['hyperelliptic'] = False
            elif info['inc_hyper'] == 'only':
                query['hyperelliptic'] = True
        if 'inc_cyc_trig' in info:
            if info['inc_cyc_trig'] == 'exclude':
                query['cyclic_trigonal'] = False
            elif info['inc_cyc_trig'] == 'only':
                query['cyclic_trigonal'] = True
        if 'inc_full' in info:
            if info['inc_full'] == 'exclude':
                query['full_auto'] = {'$exists': True}
            elif info['inc_full'] == 'only':
                query['full_auto'] = {'$exists': False}

        query['cc.1'] = 1

    except ValueError:
        return search_input_error(info, bread)
    count = parse_count(info)
    start = parse_start(info)

    if 'groupsize' in info and info['groupsize'] != '':
        err, result = add_group_order_range(query, info['groupsize'], C)
        if err != None:
            flash_error(
                'Parse error on group order field. <font face="Courier New"><br />Given: '
                + err + '<br />-------' + result + '</font>')
    """
    res = C.curve_automorphisms.passports.find(query).sort([(
         'genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING),
        ('cc'[0],pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0


    L = [ ]
    for field in res:
        field['signature'] = ast.literal_eval(field['signature'])    
        L.append(field)

    code = ""
    download_code = 'download' in info
    first_download_entry = True
    for field in L:
        field['signature'] = ast.literal_eval(field['signature'])    
        if download_code:
            if first_download_entry:
                code += '\n'.join(hgcwa_code(label=field['passport_label'], download_type='magma').split('\n')[1:])
            else:
                code += hgcwa_code(label=field['passport_label'], download_type='magma').split('result_record:=[];')[1]
            first_download_entry = False

    
    

    if 'download' in info:
        response = make_response(code)
        response.headers['Content-type'] = 'text/plain'
        return response
    """

    res = C.curve_automorphisms.passports.find(query).sort([
        ('genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING),
        ('cc'[0], pymongo.ASCENDING)
    ])

    nres = res.count()
    res = res.skip(start).limit(count)

    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    L = []
    for field in res:
        field['signature'] = ast.literal_eval(field['signature'])
        L.append(field)

    if 'download_magma' in info:
        code = "// MAGMA CODE FOR SEACH RESULTS\n\n"
        first_download_entry = True
        for field in L:
            #print field
            if first_download_entry:
                code += ('\n'.join(
                    hgcwa_code(label=field['passport_label'],
                               download_type='magma').split('\n')[1:]
                )).replace(
                    ", and generate data which is the same for all entries",
                    "")
            else:
                code += hgcwa_code(
                    label=field['passport_label'],
                    download_type='magma').split('result_record:=[];')[1]
            first_download_entry = False
        response = make_response(code)
        response.headers['Content-type'] = 'text/plain'
        return response
    elif 'download_gap' in info:
        code = "# GAP CODE FOR SEARCH RESULTS\n\n"
        first_download_entry = True
        for field in L:
            print field['group']
            if first_download_entry:
                code += ('\n'.join(
                    hgcwa_code(label=field['passport_label'],
                               download_type='gap').split('\n')
                    [1:])).replace(
                        "# Generate data which is the same for all entries.\n",
                        "")
            else:
                code += hgcwa_code(
                    label=field['passport_label'],
                    download_type='gap').split('result_record:=[];')[1]
            first_download_entry = False
        response = make_response(code)
        response.headers['Content-type'] = 'text/plain'
        return response

    info['fields'] = L
    info['number'] = nres
    info['group_display'] = sg_pretty
    info['show_downloads'] = len(L) > 0

    info['sign_display'] = sign_display
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    return render_template(
        "hgcwa-search.html",
        info=info,
        title=
        "Families of Higher Genus Curves with Automorphisms Search Result",
        credit=credit,
        bread=bread)
コード例 #39
0
ファイル: main.py プロジェクト: fredstro/lmfdb
def elliptic_curve_search(**args):
    info = to_dict(args["data"])

    if "download" in info and info["download"] != 0:
        return download_search(info)

    if not "query" in info:
        info["query"] = {}

    bread = [("Elliptic Curves", url_for(".index")), ("Search Results", ".")]
    if "jump" in info:
        label = info.get("label", "").replace(" ", "")
        # This label should be a full isogeny class label or a full
        # curve label (including the field_label component)
        try:
            nf, cond_label, iso_label, number = split_full_label(label.strip())
        except ValueError:
            info["err"] = ""
            return search_input_error(info, bread)

        return show_ecnf(nf, cond_label, iso_label, number)

    query = {}

    try:
        parse_ints(info, query, "conductor_norm")
        parse_noop(info, query, "conductor_label")
        parse_nf_string(info, query, "field", name="base number field", qfield="field_label")
        parse_nf_elt(info, query, "jinv", name="j-invariant")
        parse_ints(info, query, "torsion", name="Torsion order", qfield="torsion_order")
        parse_bracketed_posints(info, query, "torsion_structure", maxlength=2)
        if "torsion_structure" in query and not "torsion_order" in query:
            query["torsion_order"] = reduce(mul, [int(n) for n in query["torsion_structure"]], 1)
    except ValueError:
        return search_input_error(info, bread)

    if "include_isogenous" in info and info["include_isogenous"] == "off":
        info["number"] = 1
        query["number"] = 1

    if "include_base_change" in info and info["include_base_change"] == "off":
        query["base_change"] = []
    else:
        info["include_base_change"] = "on"

    if "include_Q_curves" in info:
        if info["include_Q_curves"] == "exclude":
            query["q_curve"] = False
        elif info["include_Q_curves"] == "only":
            query["q_curve"] = True

    if "include_cm" in info:
        if info["include_cm"] == "exclude":
            query["cm"] = 0
        elif info["include_cm"] == "only":
            query["cm"] = {"$ne": 0}

    info["query"] = query
    count = parse_count(info, 50)
    start = parse_start(info)

    # make the query and trim results according to start/count:

    cursor = db_ecnf().find(query)
    nres = cursor.count()
    if start >= nres:
        start -= (1 + (start - nres) / count) * count
    if start < 0:
        start = 0

    res = (
        cursor.sort(
            [
                ("field_label", ASC),
                ("conductor_norm", ASC),
                ("conductor_label", ASC),
                ("iso_nlabel", ASC),
                ("number", ASC),
            ]
        )
        .skip(start)
        .limit(count)
    )

    res = list(res)
    for e in res:
        e["numb"] = str(e["number"])
        e["field_knowl"] = nf_display_knowl(e["field_label"], getDBConnection(), field_pretty(e["field_label"]))

    info["curves"] = res  # [ECNF(e) for e in res]
    info["number"] = nres
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)
    info["field_pretty"] = field_pretty
    info["web_ainvs"] = web_ainvs
    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres)
        else:
            info["report"] = "displaying all %s matches" % nres
    t = "Elliptic Curve search results"
    return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
コード例 #40
0
def genus2_curve_search(info):
    if 'jump' in info:
        jump = info["jump"].strip()
        if re.match(r'^\d+\.[a-z]+\.\d+\.\d+$', jump):
            return redirect(url_for_curve_label(jump), 301)
        else:
            if re.match(r'^\d+\.[a-z]+$', jump):
                return redirect(url_for_isogeny_class_label(jump), 301)
            else:
                # Handle direct Lhash input
                if re.match(r'^\#\d+$', jump) and ZZ(jump[1:]) < 2**61:
                    c = g2c_db_curves().find_one({'Lhash': jump[1:].strip()})
                    if c:
                        return redirect(
                            url_for_isogeny_class_label(c["class"]), 301)
                    else:
                        errmsg = "hash %s not found"
                else:
                    errmsg = "%s is not a valid genus 2 curve or isogeny class label"
        flash_error(errmsg, jump)
        return redirect(url_for(".index"))

    if info.get('download', '').strip() == '1':
        return download_search(info)

    info["st_group_list"] = st_group_list
    info["st_group_dict"] = st_group_dict
    info["real_geom_end_alg_list"] = real_geom_end_alg_list
    info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict
    info["aut_grp_list"] = aut_grp_list
    info["aut_grp_dict"] = aut_grp_dict
    info["geom_aut_grp_list"] = geom_aut_grp_list
    info["geom_aut_grp_dict"] = geom_aut_grp_dict
    bread = info.get('bread',
                     (('Genus 2 Curves', url_for(".index")),
                      ('$\Q$', url_for(".index_Q")), ('Search Results', '.')))

    query = {}
    try:
        parse_ints(info, query, 'abs_disc', 'absolute discriminant')
        parse_bool(info, query, 'is_gl2_type', 'is of GL2-type')
        parse_bool(info, query, 'has_square_sha', 'has square Sha')
        parse_bool(info, query, 'locally_solvable', 'is locally solvable')
        parse_bool(info, query, 'is_simple_geom', 'is geometrically simple')
        parse_ints(info, query, 'cond', 'conductor')
        parse_ints(info, query, 'num_rat_wpts', 'rational Weierstrass points')
        parse_bracketed_posints(info,
                                query,
                                'torsion',
                                'torsion structure',
                                maxlength=4,
                                check_divisibility="increasing")
        parse_ints(info, query, 'torsion_order', 'torsion order')
        if 'torsion' in query and not 'torsion_order' in query:
            query['torsion_order'] = reduce(mul,
                                            [int(n) for n in query['torsion']],
                                            1)
        if 'torsion' in query:
            query['torsion_subgroup'] = str(query['torsion']).replace(" ", "")
            query.pop('torsion')  # search using string key, not array of ints
        parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank')
        parse_ints(info, query, 'analytic_rank', 'analytic rank')
        # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user)
        if 'g20' in info and 'g21' in info and 'g22' in info:
            query['g2_inv'] = "['%s','%s','%s']" % (info['g20'], info['g21'],
                                                    info['g22'])
        if 'class' in info:
            query['class'] = info['class']
        for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id',
                    'geom_aut_grp_id'):
            if info.get(fld): query[fld] = info[fld]
    except ValueError as err:
        info['err'] = str(err)
        return render_template("g2c_search_results.html",
                               info=info,
                               title='Genus 2 Curves Search Input Error',
                               bread=bread,
                               credit=credit_string)
    # Database query happens here
    info["query"] = query  # save query for reuse in download_search
    cursor = g2c_db_curves().find(
        query, {
            '_id': False,
            'label': True,
            'eqn': True,
            'st_group': True,
            'is_gl2_type': True,
            'is_simple_geom': True,
            'analytic_rank': True
        })

    count = parse_count(info, 50)
    start = parse_start(info)
    nres = cursor.count()
    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING),
                       ("disc_key", ASCENDING),
                       ("label", ASCENDING)]).skip(start).limit(count)
    nres = res.count()

    if nres == 1:
        info["report"] = "unique match"
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    res_clean = []

    for v in res:
        v_clean = {}
        v_clean["label"] = v["label"]
        v_clean["class"] = class_from_curve_label(v["label"])
        v_clean["is_gl2_type"] = v["is_gl2_type"]
        v_clean["is_simple_geom"] = v["is_simple_geom"]
        v_clean["equation_formatted"] = list_to_min_eqn(literal_eval(v["eqn"]))
        v_clean["st_group_link"] = st_link_by_name(1, 4, v['st_group'])
        v_clean["analytic_rank"] = v["analytic_rank"]
        res_clean.append(v_clean)

    info["curves"] = res_clean
    info["curve_url"] = lambda label: url_for_curve_label(label)
    info["class_url"] = lambda label: url_for_isogeny_class_label(label)
    info["start"] = start
    info["count"] = count
    info["more"] = int(start + count < nres)

    title = info.get('title', 'Genus 2 Curve search results')
    credit = credit_string

    return render_template("g2c_search_results.html",
                           info=info,
                           credit=credit,
                           learnmore=learnmore_list(),
                           bread=bread,
                           title=title)
コード例 #41
0
def hgm_search(**args):
    info = to_dict(args)
    bread = get_bread([("Search results", '')])
    query = {}
    if 'jump_to' in info:
        return render_hgm_webpage({'label': info['jump_to']})

    family_search = False
    if info.get('Submit Family') or info.get('family'):
        family_search = True

    # generic, irreducible not in DB yet

    try:
        parse_ints(info, query, 'degree')
        parse_ints(info, query, 'weight')
        parse_bracketed_posints(info, query, 'famhodge', 'family Hodge vector',split=False)
        parse_restricted(info, query, 'sign', allowed=['+1',1,-1], process=int)
        for param in ['A', 'B', 'A2', 'B2', 'A3', 'B3', 'A5', 'B5', 'A7', 'B7',
            'Au2', 'Bu2', 'Au3', 'Bu3', 'Au5', 'Bu5', 'Au7', 'Bu7']:
            parse_bracketed_posints(info, query, param, split=False,
                listprocess=lambda a: sorted(a, reverse=True))
        if not family_search:
            parse_ints(info, query, 'conductor')
            parse_rational(info, query, 't')
            parse_bracketed_posints(info, query, 'hodge', 'Hodge vector')
    except ValueError:
        return search_input_error(info, bread)

    #print query
    count_default = 20
    if info.get('count'):
        try:
            count = int(info['count'])
        except:
            count = count_default
    else:
        count = count_default
    info['count'] = count

    start_default = 0
    if info.get('start'):
        try:
            start = int(info['start'])
            if(start < 0):
                start += (1 - (start + 1) / count) * count
        except:
            start = start_default
    else:
        start = start_default
    if info.get('paging'):
        try:
            paging = int(info['paging'])
            if paging == 0:
                start = 0
        except:
            pass

    # logger.debug(query)
    if family_search:
        #query['leader'] = '1'
        res = familydb().find(query).sort([('label', pymongo.ASCENDING)])
    else:
        res = motivedb().find(query).sort([('cond', pymongo.ASCENDING), ('label', pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['motives'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    info['make_label'] = make_abt_label
    info['make_t_label'] = make_t_label
    info['ab_label'] = ab_label
    info['display_t'] = display_t
    info['family'] = family_search
    info['factorint'] = factorint

    if family_search:
        return render_template("hgm-search.html", info=info, title="Hypergeometric Family over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())
    else:
        return render_template("hgm-search.html", info=info, title="Hypergeometric Motive over $\Q$ Search Result", bread=bread, credit=HGM_credit, learnmore=learnmore_list())
コード例 #42
0
ファイル: main.py プロジェクト: haraldschilly/lmfdb
def galois_group_search(**args):
    info = to_dict(args)
    if info.get('jump_to'):
        return redirect(url_for('.by_label', label=info['jump_to']).strip(), 301)
    bread = get_bread([("Search Results", ' ')])
    C = base.getDBConnection()
    query = {}

    def includes_composite(s):
        s = s.replace(' ','').replace('..','-')
        for interval in s.split(','):
            if '-' in interval[1:]:
                ix = interval.index('-',1)
                a,b = int(interval[:ix]), int(interval[ix+1:])
                if b == a:
                    if a != 1 and not a.is_prime():
                        return True
                if b > a and b > 3:
                    return True
            else:
                a = ZZ(interval)
                if a != 1 and not a.is_prime():
                    return True
    try:
        parse_ints(info,query,'n','degree')
        parse_ints(info,query,'t')
        parse_ints(info,query,'order', qfield='orderkey', parse_singleton=make_order_key)
        parse_bracketed_posints(info, query, qfield='gapidfull', split=False, exactlength=2, keepbrackets=True, name='Gap id', field='gapid')
        for param in ('cyc', 'solv', 'prim', 'parity'):
            parse_bool(info,query,param,minus_one_to_zero=(param != 'parity'))
        degree_str = prep_ranges(info.get('n'))
        info['show_subs'] = degree_str is None or (LIST_RE.match(degree_str) and includes_composite(degree_str))
    except ValueError as err:
        info['err'] = str(err)
        return search_input_error(info, bread)

    count = parse_count(info, 50)
    start = parse_start(info)

    if 'orderkey' in query and not ('n' in query):
        res = C.transitivegroups.groups.find(query).sort([('orderkey', pymongo.ASCENDING), ('gapid', pymongo.ASCENDING), ('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)])
    else:
        res = C.transitivegroups.groups.find(query).sort([('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if(start >= nres):
        start -= (1 + (start - nres) / count) * count
    if(start < 0):
        start = 0

    info['groups'] = res
    info['group_display'] = group_display_prettyC(C)
    info['report'] = "found %s groups" % nres
    info['yesno'] = yesno
    info['wgg'] = WebGaloisGroup.from_data
    info['start'] = start
    info['number'] = nres
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres

    return render_template("gg-search.html", info=info, title="Galois Group Search Result", bread=bread, credit=GG_credit)
コード例 #43
0
def hgm_search(info):
    #info = to_dict(args)
    bread = get_bread([("Search Results", '')])
    query = {}
    queryab = {}
    queryabrev = {}
    if 'jump_to' in info:
        label = clean_input(info['jump_to'])
        if HGM_LABEL_RE.match(label):
            return render_hgm_webpage(normalize_motive(label))
        if HGM_FAMILY_LABEL_RE.match(label):
            return render_hgm_family_webpage(normalize_family(label))
        flash_error(
            '%s is not a valid label for a hypergeometric motive or family of hypergeometric motives',
            label)
        return redirect(url_for(".index"))

    family_search = False
    if info.get('Submit Family') or info.get('family'):
        family_search = True

    # generic, irreducible not in DB yet

    try:
        parse_ints(info, query, 'degree')
        parse_ints(info, query, 'weight')
        parse_bracketed_posints(info,
                                query,
                                'famhodge',
                                'family Hodge vector',
                                split=False)
        parse_restricted(info,
                         query,
                         'sign',
                         allowed=['+1', 1, -1],
                         process=int)
        for param in [
                'A', 'B', 'A2', 'B2', 'A3', 'B3', 'A5', 'B5', 'A7', 'B7',
                'Au2', 'Bu2', 'Au3', 'Bu3', 'Au5', 'Bu5', 'Au7', 'Bu7'
        ]:
            parse_bracketed_posints(
                info,
                queryab,
                param,
                split=False,
                listprocess=lambda a: sorted(a, reverse=True))
        # Make a version to search reversed way
        if not family_search:
            parse_ints(info, query, 'conductor', 'Conductor', 'cond')
            parse_rational(info, query, 't')
            parse_bracketed_posints(info, query, 'hodge', 'Hodge vector')
    except ValueError:
        if family_search:
            return render_template(
                "hgm-search.html",
                info=info,
                title="Hypergeometric Family over $\Q$ Search Result",
                bread=bread,
                credit=HGM_credit,
                learnmore=learnmore_list())
        return render_template(
            "hgm-search.html",
            info=info,
            title="Hypergeometric Motive over $\Q$ Search Result",
            bread=bread,
            credit=HGM_credit,
            learnmore=learnmore_list())

    # Now combine the parts of the query if there are A,B parts
    if queryab != {}:
        for k in queryab.keys():
            queryabrev[k + 'rev'] = queryab[k]
        queryab.update(query)
        queryabrev.update(query)
        query = {'$or': [queryab, queryabrev]}
    print query
    count_default = 20
    if info.get('count'):
        try:
            count = int(info['count'])
        except:
            count = count_default
    else:
        count = count_default
    info['count'] = count

    start_default = 0
    if info.get('start'):
        try:
            start = int(info['start'])
            if (start < 0):
                start += (1 - (start + 1) / count) * count
        except:
            start = start_default
    else:
        start = start_default
    if info.get('paging'):
        try:
            paging = int(info['paging'])
            if paging == 0:
                start = 0
        except:
            pass

    # logger.debug(query)
    if family_search:
        #query['leader'] = '1'
        res = familydb().find(query).sort([('label', pymongo.ASCENDING)])
    else:
        res = motivedb().find(query).sort([('cond', pymongo.ASCENDING),
                                           ('label', pymongo.ASCENDING)])
    nres = res.count()
    res = res.skip(start).limit(count)

    if (start >= nres):
        start -= (1 + (start - nres) / count) * count
    if (start < 0):
        start = 0

    info['motives'] = res
    info['number'] = nres
    info['start'] = start
    if nres == 1:
        info['report'] = 'unique match'
    else:
        if nres > count or start != 0:
            info['report'] = 'displaying matches %s-%s of %s' % (
                start + 1, min(nres, start + count), nres)
        else:
            info['report'] = 'displaying all %s matches' % nres
    info['make_label'] = make_abt_label
    info['make_t_label'] = make_t_label
    info['ab_label'] = ab_label
    info['display_t'] = display_t
    info['family'] = family_search
    info['factorint'] = factorint

    if family_search:
        return render_template(
            "hgm-search.html",
            info=info,
            title="Hypergeometric Family over $\Q$ Search Result",
            bread=bread,
            credit=HGM_credit,
            learnmore=learnmore_list())
    else:
        return render_template(
            "hgm-search.html",
            info=info,
            title="Hypergeometric Motive over $\Q$ Search Result",
            bread=bread,
            credit=HGM_credit,
            learnmore=learnmore_list())