示例#1
0
def start_lagrange_iteration():
    # Get user input
    (A, B) = reader.read_ab()
    m = reader.read_m()

    table = old_logic.create_table(m, A, B, settings.f)

    reversed_table = new_logic.get_reversed_table(table)

    printer.print_table(reversed_table, 'f(x)', 'x')

    while True:
        F = reader.read_F()

        n = reader.read_n(m)

        found_x = old_logic.get_polynom_value_lagrange(
            F, n, reversed_table)

        sys.stdout.write(colors.BOLD)
        print('Решение с помощью \'перевернутой\' таблицы')
        sys.stdout.write(colors.RESET)

        printer.print_results(found_x, F)

        use_bisection(A, B, m, F, n)

        print("Посчитать для новых x и n? [Y/N]")
        answer = input()

        if answer.capitalize() == 'N':
            break
        else:
            continue
def main():
    """Do the main job."""
    from printer import print_results

    # detect input directory with filenames containing dataset titles:
    try:
        inputdir = sys.argv[1]
    except IndexError:
        print('Usage: %s <input>' % sys.argv[0])
        print('<input> can be a text file with one entry per line or a',
              'directory with text files.')
        sys.exit(1)

    # read dataset titles
    inputdatasets = get_datasets_from_dir(inputdir)

    # categorise datasets
    categorised = categorise_titles(inputdatasets)

    # print results
    print_results(categorised)
示例#3
0
def run_for_owner(accts, classes, assets, targets, no_sell_holdings,
                  allow_gains):
    taxable_accts = [a for a in accts if a.taxable]
    non_taxable_accts = [a for a in accts if not a.taxable]

    current_allocations, new_allocations = analysis.optimize_allocations(
        taxable_accts=taxable_accts,
        non_taxable_accts=non_taxable_accts,
        classes=classes,
        assets=assets,
        targets=targets,
        no_sell_holdings=no_sell_holdings,
        allow_gains=allow_gains)

    printer.print_investables(accts)
    printer.print_results(current_allocations=current_allocations,
                          new_allocations=new_allocations,
                          taxable_accts=taxable_accts,
                          non_taxable_accts=non_taxable_accts,
                          classes=classes,
                          assets=assets,
                          targets=targets)
示例#4
0
def use_bisection(A, B, m, F, n):

    table = old_logic.create_table(m, A, B, settings.f)

    sys.stdout.write(colors.BOLD)
    print('\nРешение с использованием метода бисекции\n')
    sys.stdout.write(colors.RESET)

    print('Введите эпсилон:')
    epsilon = float(input())

    start = table[0][1]
    end = table[len(table) - 1][1]

    for i in range(0, len(table) - 1):
        if (table[i][2] == F):
            start = table[i][1]
            end = start
            break
        if (table[i][2] > F and table[i + 1][2] < F or table[i][2] < F and table[i + 1][2] > F):
            start = table[i][1]
            end = table[i + 1][1]
            break

    left_part = new_logic.get_polynom_newton(
        (start + end) / 2, n, table, settings.f, F)

    sections = old_logic.root_separation(left_part, n, A, B)
    xs = []

    for section in sections:
        (_, found_x, _, _) = old_logic.bisection_method(
            left_part, section[0], section[1], epsilon)
        xs.append(found_x)

    printer.print_results(xs[0], F)
示例#5
0
def main(dataset_list, create_eos_indexes, eos_dir, ignore_eos_store,
         create_das_json_store, das_dir, create_mcm_store, mcm_dir,
         get_conf_files, conf_dir, print_categorisation, print_results,
         create_records, create_conffile_records, recid_file, doi_file):
    """
    Interface for manipulation of dataset records for OpenData portal.

    DATASET_LIST is a text file with a list of datasets, one per line.

    OUTPUT_DIR is the desired output directory.

    There are several steps to produce the final json files.

    step 1) generate EOS index files for the datasets

        \b
        $ export EOS_MGM_URL=root://eospublic.cern.ch
        $ python ./code/interface.py --create-eos-indexes DATASET_LIST

        This will populate EOS_DIR with a txt and json file for each dataset.
        The files contain list of root files of that dataset.

    step 2) get DAS metadata

        \b
        $ voms-proxy-init -voms cms -rfc
        $ python ./code/interface.py --create-das-json-store DATASET_LIST

        This creates a local cache. It queries DAS (Data Aggregation Service)
        for the dataset, parent, config and mcm information and store it in
        DAS_DIR/{dataset/,parent/,config/,mcm/}.

        \b
        (It takes a lot of time to run, up to ~30 seconds / dataset)

    step 3) get McM scripts to run cmsDriver

        \b
        $ python ./code/interface.py --create-mcm-store DATASET_LIST

        This will query McM to get the dict and setup scripts for each dataset.
	It also queries the input_dataset (GEN-SIM).

    step 4) get the config files

        \b
        in ~/.globus/ dir, follow the instructions from
        https://ca.cern.ch/ca/help/?kbid=024010 and also this other command:
        $ openssl pkcs12 -in myCert.p12 -nocerts -nodes -out userkey.nodes.pem
        Then run the interface code

        $ python ./code/interface.py --get-conf-files DATASET_LIST

        This downloads the configuration files to CONF_DIR.

    step 5) generate the records

        \b
        $ python ./code/interface.py --create-records DATASET_LIST
        $ python ./code/interface.py --create-conffiles-records DATASET_LIST

    To get a markdown file with the results of the previous steps:

        $ python ./code/interface.py --print-results DATASET_LIST

        This will use all the information from the local cache to produe a list
        with all the datasets in their categories, with as much additional
        information as we got.

    In case you are interested only in the categorisation, there is no need
    to create the local cache, just run:

        $ python ./code/interface.py --print-categorisation DATASET_LIST > categorisation.md
    """
    datasets = get_datasets_from_dir(dataset_list)

    if create_eos_indexes:
        import eos_store

        if os.environ.get("EOS_MGM_URL") == "root://eospublic.cern.ch":
            eos_store.main(datasets, eos_dir)
        else:
            print("EOS_MGM_URL not set.")
            print(
                'Did you forget to "export EOS_MGM_URL=root://eospublic.cern.ch"?'
            )

    if create_das_json_store:
        # check if user has voms-proxy
        proxyinfo = subprocess.run("voms-proxy-info",
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   shell=True)
        if proxyinfo.returncode != 0:
            print("Error in VOMS proxy.")
            print('Did you forget to "voms-proxy-init -voms cms -rfc"?')
        else:
            import das_json_store
            das_json_store.main(das_dir, eos_dir, datasets, ignore_eos_store)

    if create_mcm_store:
        import mcm_store
        mcm_store.create(datasets, mcm_dir, das_dir, eos_dir, ignore_eos_store)

    if get_conf_files:
        # check if user has key and cert
        if os.path.isfile(os.environ.get("HOME") +
                          "/.globus/usercert.pem") and os.path.isfile(
                              os.environ.get("HOME") +
                              "/.globus/userkey.nodes.pem"):
            import config_store
            config_store.main(eos_dir, das_dir, conf_dir, datasets,
                              ignore_eos_store)
        else:
            print(
                "Error in key and certificate pairs (~/.globus/usercert.pem, ~/.globus/userkey.nodes.pem)."
            )
            print('Did you forget to ')
            print(
                "\t$ openssl pkcs12 -in myCert.p12 -clcerts -nokeys -out usercert.pem"
            )
            print(
                "\t$ openssl pkcs12 -in myCert.p12 -nocerts -nodes -out userkey.nodes.pem"
            )
            print('in the ~/.globus dir?')

    if print_categorisation or print_results:
        import printer
        import categorisation

        categorised = categorisation.categorise_titles(datasets)
        printer.print_results(categorised, das_dir, mcm_dir, recid_file,
                              doi_file, print_results)

    if create_records:
        import dataset_records
        dataset_records.main(datasets, eos_dir, das_dir, mcm_dir, conf_dir,
                             doi_file, recid_file)

    if create_conffile_records:
        import conffiles_records
        conffiles_records.main(datasets, eos_dir, das_dir, mcm_dir, conf_dir)