示例#1
0
 def read_info(self):
     info_config = configparser.ConfigParser()
示例#2
0
def main():
    """
    Parse arguments
    """
    parser = argparse.ArgumentParser(description='Calculate db overlaps')
    parser.add_argument('--config', help='Config file')
    parser.add_argument('--input', help='Input file')
    parser.add_argument('--output', help='Output file')
    parser.add_argument('--columns', help='Columns file')
    parser.add_argument('--verbose',
                        help='increase output verbosity',
                        action='store_true')
    args = parser.parse_args()
    """
    Read config file
    """
    if args.verbose:
        print("Reading config file")
    config = configparser.ConfigParser()
    config.read(args.config)
    """
    Read input file and define variables
    """
    if args.verbose:
        print("Reading input file and defining variables")

    my_dict_data = {}

    # input_file_path = config.get('DEFAULT', 'input')
    input_file_path = args.input
    input_file = open(input_file_path)
    line = input_file.readline()
    header = line.split('\t')

    overlapping = Overlap()
    overlap_threshold = float(config.get('DEFAULT',
                                         'overlap_threshold'))  # 0.7
    bnd_window = float(config.get('DEFAULT', 'bnd_window'))  # 200
    """
    Get list of fields to be extracted
    """
    if args.verbose:
        print("Getting list of fields to be extracted")

    unique_dbs_str = config.get('DEFAULT', 'unique_dbs')
    unique_dbs = [x.strip() for x in unique_dbs_str.split(',')
                  ]  # ['gnomad', 'ngc38', 'wgs13k', 'ngclov', 'bkl']

    dbs_list = []
    for e in unique_dbs:
        e_list = list(
            filter(
                lambda x: e in x and not x.startswith('left') and not x.
                startswith('right'), header))
        dbs_list.append(e_list)

    dbs = list(chain.from_iterable(
        dbs_list))  # eg: ['gnomad_s', 'gnomad_e', 'gnomad_ov_id'...]

    fields_list = [['CHROM', 'POS', 'END', 'SVTYPE'], dbs]
    fields = list(chain.from_iterable(fields_list))  # unlist
    """
    Build list of indices, since this may change. You can stick with the column names from above
    """
    if args.verbose:
        print("Building list of indices")

    index = []
    for f in fields:
        index.append(header.index(f))
    """
    Build dict elements
    """
    if args.verbose:
        print("Building dictionary elements")

    dic_element = 0
    for line in input_file:
        information = line.split('\t')

        my_dict_data[dic_element] = {}
        my_dict_data[dic_element]['CHROM'] = information[header.index('CHROM')]
        my_dict_data[dic_element]['POS'] = information[header.index('POS')]
        my_dict_data[dic_element]['END'] = information[header.index('END')]
        my_dict_data[dic_element]['SVTYPE'] = information[header.index(
            'SVTYPE')]
        my_dict_data[dic_element]['dbs'] = {}

        # Let's first initialize all the dictionaries to avoid overwriting them
        for db in unique_dbs:
            if information[header.index(db + '_s')] != "":
                my_dict_data[dic_element]['dbs'][db] = {}
                my_start_split = information[header.index(db +
                                                          '_s')].split(',')
                id_counter = 0
                for start in my_start_split:
                    my_dict_data[dic_element]['dbs'][db][id_counter] = {}
                    id_counter = id_counter + 1

        for db in dbs:
            if information[header.index(db)] != "":

                # split to get db name
                my_split_db_str = db.split("_")
                db_name = my_split_db_str[0]
                my_db_element = my_split_db_str[1]
                my_split = information[header.index(db)].split(',')

                id_counter = 0

                if my_db_element == 's':
                    for start in my_split:
                        my_dict_data[dic_element]['dbs'][db_name][
                            id_counter].update({'START': start})
                        id_counter = id_counter + 1

                elif my_db_element == 'e':
                    for end in my_split:
                        my_dict_data[dic_element]['dbs'][db_name][
                            id_counter].update({'END': end})
                        id_counter = id_counter + 1

                else:
                    # wgs13k has different ID
                    # db_id = ''
                    # if db_name in ['wgs13k', 'bkl']:
                    # if db_name in ['wgs13k']:
                    #     db_id = my_split_db_str[1]
                    # else:
                    #     db_id = my_split_db_str[2]

                    # my_id_split = information[header.index(db)].split(',')

                    for id in my_split:
                        my_values_split = id.split('|')

                        if db == 'gnomad_ov_id':
                            db_type = my_values_split[1]
                            SN_value = my_values_split[2]

                        else:
                            db_type = my_values_split[0]
                            SN_value = re.sub('SN=', '', my_values_split[1])

                        my_dict_data[dic_element]['dbs'][db_name][
                            id_counter].update({'SN': SN_value})
                        my_dict_data[dic_element]['dbs'][db_name][
                            id_counter].update({'db_type': db_type})

                        id_counter = id_counter + 1

        dic_element = dic_element + 1
    """
    Read dataframe in pandas
    """
    if args.verbose:
        print("Reading data frame in pandas")

    # df_pd = pd.read_csv(input_file_path, delimiter='\t')

    tmplist = []
    # pool = mp.Pool(4)  # TO DO multiprocessing

    for chunk in pd.read_csv(input_file_path, delimiter='\t', chunksize=2000):
        # c = pool.apply_async(process_frame, [chunk])
        tmplist.append(chunk)

    df_pd = pd.concat(tmplist, axis=0)
    del tmplist
    """
    For each db, calculate overlaps and add to pandas dataframe
    """
    if args.verbose:
        print("Calculating overlaps")

    for db in unique_dbs:  # for each db

        db_results = []

        for dic_element in my_dict_data.keys():  # for each row

            dic_element_value = []

            query_start = my_dict_data[dic_element]['POS']
            query_end = my_dict_data[dic_element]['END']
            query_type = my_dict_data[dic_element]['SVTYPE']

            if query_type == 'TRA' or query_type == 'INS':
                query_start = int(query_start) - bnd_window
                query_end = int(query_start) + bnd_window

            if query_start == query_end:
                query_end = int(query_end) + 1

            # print("Query: " + str(dic_element), query_start, query_end, query_type)

            if db in my_dict_data[dic_element]['dbs'].keys():

                for id_counter in my_dict_data[dic_element]['dbs'][db].keys():
                    db_start = my_dict_data[dic_element]['dbs'][db][
                        id_counter]['START']
                    db_end = my_dict_data[dic_element]['dbs'][db][id_counter][
                        'END']

                    if db == 'bkl':
                        try:
                            ovl = overlapping.overlap(
                                [int(query_start),
                                 int(query_end)],
                                [int(db_start), int(db_end)])
                            db_size = (int(ovl[1]) - int(ovl[0])) + 1
                        except Exception:
                            db_size = 0

                        dic_element_value.append(db_size)

                    else:
                        sn = my_dict_data[dic_element]['dbs'][db][id_counter][
                            'SN']
                        db_type = my_dict_data[dic_element]['dbs'][db][
                            id_counter]['db_type']

                        if db_type == query_type:
                            MRPO = overlapping.reciprocal_percentage_overlap(
                                [int(query_start),
                                 int(query_end)],
                                [int(db_start), int(db_end)])

                            if query_type == 'TRA' or query_type == 'INS':
                                dic_element_value.append(sn)
                            else:
                                if MRPO >= overlap_threshold:
                                    dic_element_value.append(sn)

            else:
                dic_element_value = ['0']

            # print("Query: " + str(dic_element_value), query_start, query_end, query_type)

            ##If bkl sum all values, else return comma separated
            if db == 'bkl':
                bkl_lst = list(map(int, dic_element_value))
                db_results.append(sum(bkl_lst))

            else:
                db_results.append(';'.join(dic_element_value))

        db_results_ref = [x if x != '' else '0' for x in db_results]

        df_pd[db] = db_results_ref
    """
    Define columns to export
    """
    if args.verbose:
        print("Defining columns to export")

    columns_path = args.columns

    with open(columns_path, 'r') as colnames:
        cols_list = yaml.safe_load(colnames)
        fixed_cols = cols_list['fixed']
        samples_cols = cols_list['samples']

    cols = list(chain.from_iterable([fixed_cols, samples_cols]))
    """
    Melt data frame
    """
    if args.verbose:
        print("Melting data frame")

    outfile = df_pd[cols]

    outfile_melt = pd.melt(outfile,
                           id_vars=fixed_cols,
                           value_vars=samples_cols,
                           var_name='SAMPLE',
                           value_name='GT')
    """
    Filtering and reformatting
    """
    if args.verbose:
        print("Filtering and reformatting")

    # outfile_alt = outfile_melt.loc[-outfile_melt['GT'].isin(['0/0', './.', './0', '0/.', '0'])]
    outfile_alt = outfile_melt.loc[
        outfile_melt['GT'] != './.:NaN:0:0,0:--:NaN:NaN:NaN:NAN:NAN:NAN']

    ##add column for sample ID
    outfile_alt['ID'] = outfile_alt.GT.str.split(':').str[7]
    # outfile_alt['ID_sample'] = outfile_alt.GT.str.split(':').str[7]

    ##keep only GT from GT column
    outfile_alt['GT'] = outfile_alt.GT.str.split(':').str[0]
    # outfile_alt.replace(to_replace='GT', value=outfile_alt.GT.str.split(':').str[0]) #not working, to fix

    ##reorder by input columns file
    cols_ordered = list(
        chain.from_iterable([['SAMPLE', 'GT'],
                             list(fixed_cols)]))
    # cols_ordered = list(chain.from_iterable([['SAMPLE', 'GT', 'ID_SAMPLE'], list(fixed_cols)]))
    outfile_out = outfile_alt[cols_ordered]
    """
    Save to file
    """
    if args.verbose:
        print("Saving to file")

    out_file_path = args.output
    out_file_melt_path = re.sub('tab', 'melt.tab', out_file_path)

    outfile.to_csv(out_file_path, sep='\t', index=False)
    outfile_out.to_csv(out_file_melt_path, sep='\t', index=False)
示例#3
0
from scipy.interpolate import InterpolatedUnivariateSpline

import numpy as np
import time
import pandas as pd
import plotly.graph_objs as go
from collections import deque
import os
import json
import configparser
import logging
import io
import base64
import string

config = configparser.ConfigParser()
config.read('./config/config.cfg')


def create_logger():
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    log_file_handler = logging.FileHandler(config.get('LOG', 'LOG_FILE_PATH'))
    fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'

    formatter = logging.Formatter(fmt)
    log_file_handler.setFormatter(formatter)

    logger.addHandler(log_file_handler)
示例#4
0
def format_file(
    filename: str,
    *,
    min_py3_version: Tuple[int, int],
    max_py_version: Tuple[int, int],
) -> bool:
    with open(filename) as f:
        contents = f.read()

    cfg = configparser.ConfigParser()
    cfg.read_string(contents)
    _clean_sections(cfg)

    # normalize names to underscores so sdist / wheel have the same prefix
    cfg['metadata']['name'] = cfg['metadata']['name'].replace('-', '_')

    # if README.md exists, set `long_description` + content type
    readme = _first_file(filename, 'readme')
    if readme is not None:
        long_description = f'file: {os.path.basename(readme)}'
        cfg['metadata']['long_description'] = long_description

        tags = identify.tags_from_filename(readme)
        if 'markdown' in tags:
            cfg['metadata']['long_description_content_type'] = 'text/markdown'
        elif 'rst' in tags:
            cfg['metadata']['long_description_content_type'] = 'text/x-rst'
        else:
            cfg['metadata']['long_description_content_type'] = 'text/plain'

    # set license fields if a license exists
    license_filename = _first_file(filename, 'licen[sc]e')
    if license_filename is not None:
        cfg['metadata']['license_file'] = os.path.basename(license_filename)

        license_id = identify.license_id(license_filename)
        if license_id is not None:
            cfg['metadata']['license'] = license_id

        if license_id in LICENSE_TO_CLASSIFIER:
            cfg['metadata']['classifiers'] = (
                cfg['metadata'].get('classifiers', '').rstrip() +
                f'\n{LICENSE_TO_CLASSIFIER[license_id]}')

    requires = _python_requires(filename, min_py3_version=min_py3_version)
    if requires is not None:
        if not cfg.has_section('options'):
            cfg.add_section('options')
        cfg['options']['python_requires'] = requires

    install_requires = _requires(cfg, 'install_requires')
    if install_requires:
        cfg['options']['install_requires'] = '\n'.join(install_requires)

    setup_requires = _requires(cfg, 'setup_requires')
    if setup_requires:
        cfg['options']['setup_requires'] = '\n'.join(setup_requires)

    if cfg.has_section('options.extras_require'):
        for key in cfg['options.extras_require']:
            group_requires = _requires(cfg, key, 'options.extras_require')
            cfg['options.extras_require'][key] = '\n'.join(group_requires)

    py_classifiers = _py_classifiers(requires, max_py_version=max_py_version)
    if py_classifiers:
        cfg['metadata']['classifiers'] = (
            cfg['metadata'].get('classifiers', '').rstrip() +
            f'\n{py_classifiers}')

    imp_classifiers = _imp_classifiers(filename)
    if imp_classifiers:
        cfg['metadata']['classifiers'] = (
            cfg['metadata'].get('classifiers', '').rstrip() +
            f'\n{imp_classifiers}')

    # sort the classifiers if present
    if 'classifiers' in cfg['metadata']:
        classifiers = sorted(set(cfg['metadata']['classifiers'].split('\n')))
        classifiers = _trim_py_classifiers(
            classifiers,
            requires,
            max_py_version=max_py_version,
        )
        cfg['metadata']['classifiers'] = '\n'.join(classifiers)

    sections: Dict[str, Dict[str, str]] = {}
    for section, key_order in KEYS_ORDER:
        if section not in cfg:
            continue

        entries = {k.replace('-', '_'): v for k, v in cfg[section].items()}

        new_section = {k: entries.pop(k) for k in key_order if k in entries}
        # sort any remaining keys
        new_section.update(sorted(entries.items()))

        sections[section] = new_section
        cfg.pop(section)

    for section in cfg.sections():
        sections[section] = dict(cfg[section])
        cfg.pop(section)

    for k, v in sections.items():
        cfg[k] = v

    sio = io.StringIO()
    cfg.write(sio)
    new_contents = sio.getvalue().strip() + '\n'
    new_contents = new_contents.replace('\t', '    ')
    new_contents = new_contents.replace(' \n', '\n')

    if new_contents != contents:
        with open(filename, 'w') as f:
            f.write(new_contents)

    return new_contents != contents
示例#5
0
def void_island_grind(window):
    time.sleep(1)
    config = configparser.ConfigParser()
    try:
        config.read('config.ini', encoding='utf-8')
    except:
        config.read('config.ini', encoding='utf-8-sig')
    restart = int(config['Void_Island']['Restart'])
    level = int(config['Void_Island']['Level'])
    melee_kaki_index = config['Void_Island']['Melee_Index'].split(',')
    range_kaki_index = config['Void_Island']['Range_Index'].split(',')

    adv_start_diff = [1619 - 245, 887 - 123]
    pyautogui.click(adv_start_diff[0] + window[0], adv_start_diff[1] + window[1], duration=0.25)

    # check if inventory full
    full_inventory_img_diff = [727 - 245, 351 - 123, 1190 - 245, 397 - 123]
    full_inventory_img = ImageGrab.grab(bbox=(window[0] + full_inventory_img_diff[0],
                                              window[1] + full_inventory_img_diff[1],
                                              window[0] + full_inventory_img_diff[2],
                                              window[1] + full_inventory_img_diff[3]))
    full_inventory_img.save('inventory_check.jpg', 'JPEG')
    im_hash = imagehash.average_hash(Image.open('inventory_check.jpg'))
    im_hash_ref = imagehash.average_hash(Image.open('Ref\\inventory_check_ref.jpg'))
    if abs(im_hash - im_hash_ref) > 4:
        void_island_diff = [328 - 245, 560 - 123]
        pyautogui.click(void_island_diff[0] + window[0], void_island_diff[1] + window[1], duration=0.5)
        level_inc_diff = [1149 - 245, 716 - 123]
        time.sleep(0.8)
        if level > 1:
            pyautogui.click(level_inc_diff[0] + window[0], level_inc_diff[1] + window[1], clicks=level,
                            interval=0.5)
        grind_start_diff = [1103 - 245, 831 - 123]
        pyautogui.click(grind_start_diff[0] + window[0], grind_start_diff[1] + window[1], duration=0.8)
        team_select_diff = [859 - 245, 748 - 123]
        pyautogui.click(team_select_diff[0] + window[0], team_select_diff[1] + window[1], duration=0.8)

        melee_group_diff = [596 - 245, 624 - 123]
        first_kaki_diff = [365 - 245, 766 - 123]
        pyautogui.click(melee_group_diff[0] + window[0], melee_group_diff[1] + window[1], duration=0.8)
        time.sleep(0.2)
        move_counter = 0
        for i in range(len(melee_kaki_index)):  # max 13 kaki, in case of new kaki, need to modify the if statement
            if (int(melee_kaki_index[i]) - 6 * move_counter) > 7:
                pyautogui.moveTo(first_kaki_diff[0] + 150 * 6 + window[0], first_kaki_diff[1] + window[1])
                pyautogui.mouseDown()
                time.sleep(0.5)
                pyautogui.dragRel(xOffset=-150 * 6, yOffset=0, duration=3, mouseDownUp=False)
                time.sleep(0.5)
                pyautogui.mouseUp()
                move_counter += 1
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(melee_kaki_index[i]) - 6 * move_counter - 1) + window[0],
                                first_kaki_diff[1] + window[1], duration=0.8)
            elif (int(melee_kaki_index[i]) - 6 * move_counter) <= 7:
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(melee_kaki_index[i]) - 6 * move_counter - 1) + window[0],
                                first_kaki_diff[1] + window[1], duration=0.8)

        range_group_diff = [797 - 245, 622 - 123]
        pyautogui.click(range_group_diff[0] + window[0], range_group_diff[1] + window[1], duration=0.8)
        for i in range(len(range_kaki_index)):  # max 17 kaki, in case of new kaki, need to modify if statement
            if (int(range_kaki_index[i]) - 6 * move_counter) > 13:
                pyautogui.moveTo(first_kaki_diff[0] + 150 * 6 + window[0], first_kaki_diff[1] + window[1])
                pyautogui.mouseDown()
                time.sleep(0.5)
                pyautogui.dragRel(xOffset=-150 * 6, yOffset=0, duration=3, mouseDownUp=False)
                time.sleep(0.5)
                pyautogui.mouseUp()
                pyautogui.moveTo(first_kaki_diff[0] + 150 * 6 + window[0], first_kaki_diff[1] + window[1])
                pyautogui.mouseDown()
                time.sleep(0.5)
                pyautogui.dragRel(xOffset=-150 * 6, yOffset=0, duration=3, mouseDownUp=False)
                time.sleep(0.5)
                pyautogui.mouseUp()
                move_counter += 2
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(range_kaki_index[i]) - 6 * move_counter) + window[0] - 3,
                                first_kaki_diff[1] + window[1], duration=0.8)
            elif (int(range_kaki_index[i]) - 6 * move_counter) > 7:
                pyautogui.moveTo(first_kaki_diff[0] + 150 * 6 + window[0], first_kaki_diff[1] + window[1])
                pyautogui.mouseDown()
                time.sleep(0.5)
                pyautogui.dragRel(xOffset=-150 * 6, yOffset=0, duration=3, mouseDownUp=False)
                time.sleep(0.5)
                pyautogui.mouseUp()
                move_counter += 1
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(range_kaki_index[i]) - 6 * move_counter - 1) + window[0],
                                first_kaki_diff[1] + window[1], duration=0.8)
            elif 0 < (int(range_kaki_index[i]) - 6 * move_counter) <= 7:
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(range_kaki_index[i]) - 6 * move_counter - 1) + window[0],
                                first_kaki_diff[1] + window[1], duration=0.8)
            elif (int(range_kaki_index[i]) - 6 * move_counter) <= 0:
                pyautogui.moveTo(first_kaki_diff[0] + window[0], first_kaki_diff[1] + window[1])
                pyautogui.mouseDown()
                time.sleep(0.5)
                pyautogui.dragRel(xOffset=150 * 6, yOffset=0, duration=3, mouseDownUp=False)
                time.sleep(0.5)
                pyautogui.mouseUp()
                move_counter -= 1
                pyautogui.click(first_kaki_diff[0] +
                                150 * (int(range_kaki_index[i]) - 6 * move_counter - 1) + window[0],
                                first_kaki_diff[1] + window[1], duration=0.8)
        confirm_team_diff = [1493 - 245, 872 - 123]
        pyautogui.click(window[0] + confirm_team_diff[0], window[1] + confirm_team_diff[1], duration=0.5)
        go_button_diff = [1460 - 245, 839 - 123]
        pyautogui.click(window[0] + go_button_diff[0], window[1] + go_button_diff[1], duration=0.5)
        time.sleep(6)
        toggle_auto_path_finding(window)
        time.sleep(6)
        select_blessing_diff = [1047 - 245, 740 - 123]
        pyautogui.click(window[0] + select_blessing_diff[0], window[1] + select_blessing_diff[1], duration=0.5)
        time.sleep(60*restart - 10)
        success_continue_diff = [957 - 245, 874 - 123]
        pyautogui.click(window[0] + success_continue_diff[0], window[1] + success_continue_diff[1], duration=0.5)
        time.sleep(1)  # miracle_stone experience completion
        pyautogui.click(window[0] + success_continue_diff[0], window[1] + success_continue_diff[1], duration=0.5)
        time.sleep(10)  # confirm
        pyautogui.click(window[0] + success_continue_diff[0], window[1] + success_continue_diff[1], duration=0.5)
        time.sleep(10)
    else:
        send_email('Inventory Full!')
        sys.exit()
    pass
示例#6
0
def main():
    # Parse arguments and pass through unrecognised args
    parser = argparse.ArgumentParser(
        add_help=False,
        usage='%(prog)s [test_runner.py options] [script options] [scripts]',
        description=__doc__,
        epilog='''
    Help text and arguments for individual test script:''',
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '--combinedlogslen',
        '-c',
        type=int,
        default=0,
        help=
        'print a combined log (of length n lines) from all test nodes and test framework to the console on failure.'
    )
    parser.add_argument(
        '--coverage',
        action='store_true',
        help='generate a basic coverage report for the RPC interface')
    parser.add_argument(
        '--exclude',
        '-x',
        help='specify a comma-separated-list of scripts to exclude.')
    parser.add_argument(
        '--extended',
        action='store_true',
        help='run the extended test suite in addition to the basic tests')
    parser.add_argument(
        '--force',
        '-f',
        action='store_true',
        help=
        'run tests even on platforms where they are disabled by default (e.g. windows).'
    )
    parser.add_argument('--help',
                        '-h',
                        '-?',
                        action='store_true',
                        help='print help text and exit')
    parser.add_argument(
        '--jobs',
        '-j',
        type=int,
        default=4,
        help='how many test scripts to run in parallel. Default=4.')
    parser.add_argument(
        '--keepcache',
        '-k',
        action='store_true',
        help=
        'the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.'
    )
    parser.add_argument('--quiet',
                        '-q',
                        action='store_true',
                        help='only print results summary and failure logs')
    parser.add_argument('--tmpdirprefix',
                        '-t',
                        default=tempfile.gettempdir(),
                        help="Root directory for datadirs")
    parser.add_argument('--failfast',
                        action='store_true',
                        help='stop execution after the first test failure')
    args, unknown_args = parser.parse_known_args()

    # args to be passed on always start with two dashes; tests are the remaining unknown args
    tests = [arg for arg in unknown_args if arg[:2] != "--"]
    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]

    # Read config generated by configure.
    config = configparser.ConfigParser()
    configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
    config.read_file(open(configfile, encoding="utf8"))

    passon_args.append("--configfile=%s" % configfile)

    # Set up logging
    logging_level = logging.INFO if args.quiet else logging.DEBUG
    logging.basicConfig(format='%(message)s', level=logging_level)

    # Create base test directory
    tmpdir = "%s/test_runner_Ł_🏃_%s" % (
        args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
    os.makedirs(tmpdir)

    logging.debug("Temporary test directory at %s" % tmpdir)

    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")

    if config["environment"]["EXEEXT"] == ".exe" and not args.force:
        # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
        # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
        print(
            "Tests currently disabled on Windows by default. Use --force option to enable"
        )
        sys.exit(0)

    if not enable_bitcoind:
        print("No functional tests to run.")
        print("Rerun ./configure with --with-daemon and then make")
        sys.exit(0)

    # Build list of tests
    test_list = []
    if tests:
        # Individual tests have been specified. Run specified tests that exist
        # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
        tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
        for test in tests:
            if test in ALL_SCRIPTS:
                test_list.append(test)
            else:
                print("{}WARNING!{} Test '{}' not found in full test list.".
                      format(BOLD[1], BOLD[0], test))
    elif args.extended:
        # Include extended tests
        test_list += ALL_SCRIPTS
    else:
        # Run base tests only
        test_list += BASE_SCRIPTS

    # Remove the test cases that the user has explicitly asked to exclude.
    if args.exclude:
        exclude_tests = [
            re.sub("\.py$", "", test) + ".py"
            for test in args.exclude.split(',')
        ]
        for exclude_test in exclude_tests:
            if exclude_test in test_list:
                test_list.remove(exclude_test)
            else:
                print("{}WARNING!{} Test '{}' not found in current test list.".
                      format(BOLD[1], BOLD[0], exclude_test))

    if not test_list:
        print(
            "No valid test scripts specified. Check that your test is in one "
            "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests"
        )
        sys.exit(0)

    if args.help:
        # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
        parser.print_help()
        subprocess.check_call([
            sys.executable,
            os.path.join(config["environment"]["SRCDIR"], 'test', 'functional',
                         test_list[0].split()[0]), '-h'
        ])
        sys.exit(0)

    check_script_list(config["environment"]["SRCDIR"])
    check_script_prefixes()

    if not args.keepcache:
        shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"],
                      ignore_errors=True)

    run_tests(
        test_list,
        config["environment"]["SRCDIR"],
        config["environment"]["BUILDDIR"],
        tmpdir,
        jobs=args.jobs,
        enable_coverage=args.coverage,
        args=passon_args,
        combined_logs_len=args.combinedlogslen,
        failfast=args.failfast,
    )
示例#7
0
def token():
    config = configparser.ConfigParser()
    config.read("config.ini")
    return config.get("bot", "token")
 def __init__(self):
     self._conf = configparser.ConfigParser()
     if os.path.exists(cloudstack_file):
         self._conf.read(cloudstack_file)
示例#9
0
 def saveDefaults(self, Dic=DefaultDic):
     config = configparser.ConfigParser()
     config.read_dict(Dic)
     with open('./configuration.ini', 'w') as configfile:
         config.write(configfile)
示例#10
0
import logging
import sys

import matplotlib.pyplot as plt
import pandas as pd
import psycopg2.errors
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.exc import IntegrityError

from wotstats.api import TIME_FIELDS, Realm, account_info
from wotstats.plotting import create_plot
from wotstats.sql import statistics
from wotstats.utils import flatten, timestamps_to_datetime

config = configparser.ConfigParser(allow_no_value=True)
config.read_dict(
    {
        "db": {"url": "postgresql://wotstats@localhost/wotstats"},
        "api": {"realm": "EU", "application-id": ""},
        "accounts": {},
        "plots": {},
        "logging": {"log-level": "INFO"},
    }
)

parser = argparse.ArgumentParser()
parser.add_argument("--config", default="/etc/wotstats/wotstats.ini")


def main(args=None):
示例#11
0
def process_file(path, **kwargs):
    if not osp.isfile(path):
        import errno
        raise IOError(errno.ENOENT, "No such file", path)

    verboseprint("Processing file %s...." % path)

    if kwargs.get('backup', True):
        import shutil
        shutil.copyfile(path, path + '.bak')

    cpt = configparser.ConfigParser()

    # gem5 is case sensitive with paramaters
    cpt.optionxform = str

    # Read the current data
    cpt_file = open(path, 'r')
    cpt.read_file(cpt_file)
    cpt_file.close()

    change = False

    # Make sure we know what we're starting from
    if cpt.has_option('root','cpt_ver'):
        cpt_ver = cpt.getint('root','cpt_ver')

        # Legacy linear checkpoint version
        # convert to list of tags before proceeding
        tags = set([])
        for i in range(2, cpt_ver+1):
            tags.add(Upgrader.legacy[i].tag)
        verboseprint("performed legacy version -> tags conversion")
        change = True

        cpt.remove_option('root', 'cpt_ver')
    # @todo The 'Globals' option is deprecated, and should be removed in the
    # future
    elif cpt.has_option('Globals','version_tags'):
        tags = set((''.join(cpt.get('Globals','version_tags'))).split())
    elif cpt.has_option('root.globals','version_tags'):
        tags = set((''.join(cpt.get('root.globals','version_tags'))).split())
    else:
        print("fatal: no version information in checkpoint")
        exit(1)

    verboseprint("has tags", ' '.join(tags))
    # If the current checkpoint has a tag we don't know about, we have
    # a divergence that (in general) must be addressed by (e.g.) merging
    # simulator support for its changes.
    unknown_tags = tags - (Upgrader.tag_set | Upgrader.untag_set)
    if unknown_tags:
        print("warning: upgrade script does not recognize the following "
              "tags in this checkpoint:", ' '.join(unknown_tags))

    # Apply migrations for tags not in checkpoint and tags present for which
    # downgraders are present, respecting dependences
    to_apply = (Upgrader.tag_set - tags) | (Upgrader.untag_set & tags)
    while to_apply:
        ready = set([ t for t in to_apply if Upgrader.get(t).ready(tags) ])
        if not ready:
            print("could not apply these upgrades:", ' '.join(to_apply))
            print("update dependences impossible to resolve; aborting")
            exit(1)

        for tag in ready:
            Upgrader.get(tag).update(cpt, tags)
            change = True

        to_apply -= ready

    if not change:
        verboseprint("...nothing to do")
        return

    cpt.set('root.globals', 'version_tags', ' '.join(tags))

    # Write the old data back
    verboseprint("...completed")
    cpt.write(open(path, 'w'))
示例#12
0
from threading import local

from .cursors import *
from .format import FormatterMixin
from .query import QueryMixin
from .schema import SchemaMixin
from .sync import SyncMixin

logger = logging.getLogger(__name__)

_ROOT_ID_SQL = 'SELECT id FROM nodes WHERE name IS NULL AND type == "folder" ORDER BY created'


_SETTINGS_FILENAME = 'cache.ini'

_def_conf = configparser.ConfigParser()
_def_conf['sqlite'] = dict(filename='nodes.db', busy_timeout=30000, journal_mode='wal')
_def_conf['blacklist'] = dict(folders= [])


def _get_conf(path='') -> configparser.ConfigParser:
    conf = configparser.ConfigParser()
    conf.read_dict(_def_conf)

    conffn = os.path.join(path, _SETTINGS_FILENAME)
    try:
        with open(conffn) as cf:
            conf.read_file(cf)
    except OSError:
        pass
示例#13
0
 def getConfig(self):
     self.cf = configparser.ConfigParser()
     self.cf.read(self.configLocation)
     return self.cf
示例#14
0
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    weights_header = np.ndarray(
        shape=(5, ), dtype='int32', buffer=weights_file.read(20))
    print('Weights Header: ', weights_header)
    # TODO: Check transpose flag when implementing fully connected layers.
    # transpose = (weight_header[0] > 1000) or (weight_header[1] > 1000)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    if args.fully_convolutional:
        image_height, image_width = None, None
    else:
        image_height = int(cfg_parser['net_0']['height'])
        image_width = int(cfg_parser['net_0']['width'])

    prev_layer = Input(shape=(image_height, image_width, 3))
    all_layers = [prev_layer]
    outputs = []

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0

    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            # TODO: This assumes channel last dim_ordering.
            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)

            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                # TODO: Keras BatchNormalization mistakenly refers to var
                # as std.
                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            # TODO: Add check for Theano dim ordering.
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            padding = 'same' if pad == 1 and stride == 1 else 'valid'
            # Adjust padding model for darknet.
            if stride == 2:
                prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)

            # Create Conv2D layer
            conv_layer = (Conv2D(
                filters, (size, size),
                strides=(stride, stride),
                kernel_regularizer=l2(weight_decay),
                use_bias=not batch_normalize,
                weights=conv_weights,
                activation=act_fn,
                padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)

            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(prev_layer)

        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    padding='same',
                    pool_size=(size, size),
                    strides=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('avgpool'):
            if cfg_parser.items(section) != []:
                raise ValueError('{} with params unsupported.'.format(section))
            all_layers.append(GlobalAveragePooling2D()(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            if len(ids) == 2:
                for i, item in enumerate(ids):
                    if item != -1:
                        ids[i] = item + 1

            layers = [all_layers[i] for i in ids]

            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = concatenate(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('shortcut'):
            ids = [int(i) for i in cfg_parser[section]['from'].split(',')][0]
            activation = cfg_parser[section]['activation']
            shortcut = add([all_layers[ids], prev_layer])
            if activation == 'linear':
                shortcut = Activation('linear')(shortcut)
            all_layers.append(shortcut)
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                UpSampling2D(
                    size=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            classes = int(cfg_parser[section]['classes'])
            # num = int(cfg_parser[section]['num'])
            # mask = int(cfg_parser[section]['mask'])
            n1, n2 = int(prev_layer.shape[1]), int(prev_layer.shape[2])
            n3 = 3
            n4 = (4 + 1 + classes)
            yolo = Reshape((n1, n2, n3, n4))(prev_layer)
            all_layers.append(yolo)
            prev_layer = all_layers[-1]
            outputs.append(len(all_layers) - 1)

        elif (section.startswith('net')):
            pass  # Configs not currently handled during model definition.
        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    model = Model(inputs=all_layers[0],
                  outputs=[all_layers[i] for i in outputs])
    print(model.summary())
    model.save('{}'.format(output_path))
    print('Saved Keras model to {}'.format(output_path))
    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(count, count +
                                                       remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
    print('Saved model plot to {}.png'.format(output_root))
示例#15
0
def get_config(path: str) -> configparser.ConfigParser:
    config_obj = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation())
    with open(path) as f:
        config_obj.read_file(f)
    return config_obj
示例#16
0
    def create_config(self,
                      rl_time_steps=3000000,
                      garden_time_steps=40,
                      garden_x=10,
                      garden_y=10,
                      sector_width=2,
                      sector_height=2,
                      num_plant_types=2,
                      num_plants_per_type=1,
                      step=1,
                      action_low=0.0,
                      action_high=sim_globals.MAX_WATER_LEVEL,
                      obs_low=0,
                      obs_high=1000,
                      ent_coef=0.01,
                      n_steps=40000,
                      nminibatches=4,
                      noptepochs=4,
                      learning_rate=1e-8,
                      cc_coef=0,
                      water_coef=0,
                      cnn_args=None,
                      dir_path=""):
        config = configparser.ConfigParser()
        config.add_section('rl')
        config['rl']['time_steps'] = str(rl_time_steps)
        config['rl']['ent_coef'] = str(ent_coef)
        config['rl']['n_steps'] = str(n_steps)
        config['rl']['nminibatches'] = str(nminibatches)
        config['rl']['noptepochs'] = str(noptepochs)
        config['rl']['learning_rate'] = str(learning_rate)
        if cnn_args:
            config.add_section('cnn')
            config['cnn']['output_x'] = str(cnn_args["OUTPUT_X"])
            config['cnn']['output_y'] = str(cnn_args["OUTPUT_Y"])
            config['cnn']['num_hidden_layers'] = str(
                cnn_args["NUM_HIDDEN_LAYERS"])
            config['cnn']['num_filters'] = str(cnn_args["NUM_FILTERS"])
            config['cnn']['num_convs'] = str(cnn_args["NUM_CONVS"])
            config['cnn']['filter_size'] = str(cnn_args["FILTER_SIZE"])
            config['cnn']['stride'] = str(cnn_args["STRIDE"])
        config.add_section('garden')
        config['garden']['time_steps'] = str(garden_time_steps)
        config['garden']['X'] = str(garden_x)
        config['garden']['Y'] = str(garden_y)
        config['garden']['sector_width'] = str(sector_width)
        config['garden']['sector_height'] = str(sector_height)
        config['garden']['num_plant_types'] = str(num_plant_types)
        config['garden']['num_plants_per_type'] = str(num_plants_per_type)
        config['garden']['step'] = str(step)
        config.add_section('action')
        config['action']['low'] = str(action_low)
        config['action']['high'] = str(action_high)
        config.add_section('obs')
        config['obs']['low'] = str(obs_low)
        config['obs']['high'] = str(obs_high)
        config.add_section('reward')
        config['reward']['cc_coef'] = str(cc_coef)
        config['reward']['water_coef'] = str(water_coef)

        pathlib.Path('gym_config').mkdir(parents=True, exist_ok=True)
        with open('gym_config/config.ini', 'w') as configfile:
            config.write(configfile)
示例#17
0
def open_config_file(configfile_path):
    parser = configparser.ConfigParser()
    parser.optionxform = str
    parser.read([configfile_path])
    return parser
示例#18
0
    def main(self):
        """Main function. This should not be overridden by the subclass test scripts."""

        parser = argparse.ArgumentParser(usage="%(prog)s [options]")
        parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
                            help="Leave worldcoinds and test.* datadir on exit or error")
        parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
                            help="Don't stop worldcoinds after the test execution")
        parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
                            help="Directory for caching pregenerated datadirs (default: %(default)s)")
        parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
        parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
                            help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
        parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
                            help="Print out all RPC calls as they are made")
        parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
                            help="The seed to use for assigning port numbers (default: current process id)")
        parser.add_argument("--coveragedir", dest="coveragedir",
                            help="Write tested RPC commands into this directory")
        parser.add_argument("--configfile", dest="configfile",
                            default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
                            help="Location of the test framework config file (default: %(default)s)")
        parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
                            help="Attach a python debugger if test fails")
        parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
                            help="use worldcoin-cli instead of RPC for all commands")
        parser.add_argument("--perf", dest="perf", default=False, action="store_true",
                            help="profile running nodes with perf for the duration of the test")
        self.add_options(parser)
        self.options = parser.parse_args()

        PortSeed.n = self.options.port_seed

        check_json_precision()

        self.options.cachedir = os.path.abspath(self.options.cachedir)

        config = configparser.ConfigParser()
        config.read_file(open(self.options.configfile))
        self.config = config
        self.options.bitcoind = os.getenv("LITECOIND", default=config["environment"]["BUILDDIR"] + '/src/worldcoind' + config["environment"]["EXEEXT"])
        self.options.bitcoincli = os.getenv("LITECOINCLI", default=config["environment"]["BUILDDIR"] + '/src/worldcoin-cli' + config["environment"]["EXEEXT"])

        os.environ['PATH'] = os.pathsep.join([
            os.path.join(config['environment']['BUILDDIR'], 'src'),
            os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
            os.environ['PATH']
        ])

        # Set up temp directory and start logging
        if self.options.tmpdir:
            self.options.tmpdir = os.path.abspath(self.options.tmpdir)
            os.makedirs(self.options.tmpdir, exist_ok=False)
        else:
            self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
        self._start_logging()

        self.log.debug('Setting up network thread')
        self.network_thread = NetworkThread()
        self.network_thread.start()

        success = TestStatus.FAILED

        try:
            if self.options.usecli:
                if not self.supports_cli:
                    raise SkipTest("--usecli specified but test does not support using CLI")
                self.skip_if_no_cli()
            self.skip_test_if_missing_module()
            self.setup_chain()
            self.setup_network()
            self.run_test()
            success = TestStatus.PASSED
        except JSONRPCException as e:
            self.log.exception("JSONRPC error")
        except SkipTest as e:
            self.log.warning("Test Skipped: %s" % e.message)
            success = TestStatus.SKIPPED
        except AssertionError as e:
            self.log.exception("Assertion failed")
        except KeyError as e:
            self.log.exception("Key error")
        except Exception as e:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt as e:
            self.log.warning("Exiting after keyboard interrupt")

        if success == TestStatus.FAILED and self.options.pdbonfailure:
            print("Testcase failed. Attaching python debugger. Enter ? for help")
            pdb.set_trace()

        self.log.debug('Closing down network thread')
        self.network_thread.close()
        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            if self.nodes:
                self.stop_nodes()
        else:
            for node in self.nodes:
                node.cleanup_on_exit = False
            self.log.info("Note: worldcoinds were not stopped and may still be running")

        should_clean_up = (
            not self.options.nocleanup and
            not self.options.noshutdown and
            success != TestStatus.FAILED and
            not self.options.perf
        )
        if should_clean_up:
            self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
            cleanup_tree_on_exit = True
        elif self.options.perf:
            self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
            cleanup_tree_on_exit = False
        else:
            self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
            cleanup_tree_on_exit = False

        if success == TestStatus.PASSED:
            self.log.info("Tests successful")
            exit_code = TEST_EXIT_PASSED
        elif success == TestStatus.SKIPPED:
            self.log.info("Test skipped")
            exit_code = TEST_EXIT_SKIPPED
        else:
            self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
            self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
            exit_code = TEST_EXIT_FAILED
        logging.shutdown()
        if cleanup_tree_on_exit:
            shutil.rmtree(self.options.tmpdir)
        sys.exit(exit_code)
示例#19
0
_config_file_name = 'kiskadee.conf'
_sys_config_file = os.path.join('/etc', _config_file_name)
_dev_config_file = os.path.join(os.path.dirname(_my_path),  # go up a dir
                                'util', _config_file_name)
_doc_config_file = os.path.join(os.path.dirname(_my_path),
                                'util', _config_file_name)
_defaults = {}
if not os.path.exists(_sys_config_file):
    # log _sys_config_file not found
    # raise ValueError("No such file or directory: %s" % _sys_config_file)
    pass
if not os.path.exists(_dev_config_file):
    # log _dev_config_file not found
    pass

config = configparser.ConfigParser(defaults=_defaults)

_read = config.read([_dev_config_file, _sys_config_file, _doc_config_file])
if len(_read) < 1:
    raise ValueError("Invalid config files. Should be either %s or %s or %s" %
                     (_sys_config_file, _dev_config_file, _doc_config_file))
    # log no config files were loaded
    pass
elif len(_read) == 2 or _read[0] == _sys_config_file:
    # log _sys_config_file found loaded
    pass
else:
    # log _read[0] loaded
    pass

log_file = config['DEFAULT']['log_file']
示例#20
0
    def is_cli_compiled(self):
        """Checks whether bitcoin-cli was compiled."""
        config = configparser.ConfigParser()
        config.read_file(open(self.options.configfile))

        return config["components"].getboolean("ENABLE_CLI")
示例#21
0
    def __init__(self, app, **kwargs):
        ttk.Frame.__init__(self, app, **kwargs)

        # ----------Globals Variables---------- #

        self.map_lst = {}
        self.addon_lst = {}
        self.maps_namelst = []
        self.addon_namelst = []
        self.c_map = []
        self.map_selected = []
        self.filever = "0.0.6" # actual version of EpicWorkshop
        self.ew_cfg = configparser.ConfigParser()
        self.updaterVal = True

        # ------------------------------------ #

        app.title("Epic Workshop " + self.filever)
        app.geometry("475x420")
        app.minsize(475, 400)

        #-----------------------------------------------------------------------------------------------------------#

        if getattr(sys, 'frozen', False):  # if the app is compiled
            app.iconbitmap(resource_path("epicworkshop.ico"))
        elif __file__:  # if the app is executed with python
            app.iconbitmap(sys.path[0] + "/img/epicworkshop.ico")

        #-----------------------------------------------------------------------------------------------------------#
        
        app.config(bg="#272727")

        app.protocol("WM_DELETE_WINDOW", self.on_closing)

        self.frame = Frame(app, bg="#272727", highlightthickness=0)
        self.frame.pack(fill=BOTH, anchor=CENTER, expand=YES)

        self.tmp_can = Canvas(self.frame, bg="#272727", height=40, width=200, highlightthickness=0)
        self.tmp_can.place(anchor=CENTER, relx=0.5, rely=0.6)

        self.cfg_label = Label(self.frame, text="Hi !", bg="#272727", font=("Consolas", 12), fg="white", highlightthickness=0)
        self.cfg_label.place(anchor=CENTER, relx=0.5, rely=0.5)
        
        # check if the version of the "version" file is the same as the app
        if os.path.exists(appdata("version")):
            with open(appdata("version"), "r") as data:
                ver = data.read()
            if ver != self.filever:
                patch = requests.get("https://api.github.com/repos/Naaikho/epicworkshop-compiled/releases/latest")
                patch = patch.json()["body"]
                showinfo("Patch v" + self.filever, patch)
                with open(appdata("version"), "w") as data:
                    data.write(self.filever)
        else: # or just create the "version" file
            with open(appdata("version"), "w") as data:
                data.write(self.filever)

        # start the init thread for show the live infos
        self.initthread = threading.Thread(target=self.init)
        self.initthread.daemon = True
        self.initthread.start()
示例#22
0
    def is_zmq_compiled(self):
        """Checks whether the zmq module was compiled."""
        config = configparser.ConfigParser()
        config.read_file(open(self.options.configfile))

        return config["components"].getboolean("ENABLE_ZMQ")
示例#23
0
def load_configfile(filename):
    config_file_parser = configparser.ConfigParser(converters={'list': lambda x: [i.strip() for i in x.split(',')]})
    config_file_parser.defaults()
    config_file_parser.read(filename)
    return config_file_parser
示例#24
0
 def read_token_from_config_file(self, settings):
     parser = cfg.ConfigParser()
     parser.read(settings)
     return parser.get('creds', 'token')
示例#25
0
import os, configparser

from PyQt5.QtWidgets import (QFileDialog, QLabel, QLineEdit, QSpinBox,
                             QDoubleSpinBox, QCheckBox, QGroupBox, QComboBox,
                             QPushButton)

from libpport import utilities

config = configparser.ConfigParser(strict=False)
config.optionxform = str


def openini(parent, fileName=''):
    parent.tabWidget.setCurrentIndex(0)
    parent.machinePTE.clear()
    if not fileName:
        if os.path.isdir(os.path.expanduser('~/linuxcnc/configs')):
            configsDir = os.path.expanduser('~/linuxcnc/configs')
        else:
            configsDir = os.path.expanduser('~/')
        fileName = QFileDialog.getOpenFileName(
            parent,
            caption="Select Configuration INI File",
            directory=configsDir,
            filter='*.ini',
            options=QFileDialog.DontUseNativeDialog,
        )
        if fileName:
            parent.machinePTE.appendPlainText(f'Loading {fileName[0]}')
            iniFile = (fileName[0])
        else:
def popen_webhost(*, stdout, stderr, script_root=FUNCS_PATH, port=None):
    testconfig = None
    if WORKER_CONFIG.exists():
        testconfig = configparser.ConfigParser()
        testconfig.read(WORKER_CONFIG)

    dll = os.environ.get('PYAZURE_WEBHOST_DLL')
    if not dll and testconfig and testconfig.has_section('webhost'):
        dll = testconfig['webhost'].get('dll')

    if dll:
        # Paths from environment might contain trailing or leading whitespace.
        dll = dll.strip()

    if not dll:
        dll = DEFAULT_WEBHOST_DLL_PATH

    if not dll or not pathlib.Path(dll).exists():
        raise RuntimeError('\n'.join([
            f'Unable to locate Azure Functions Host binary.',
            f'Please do one of the following:',
            f' * run the following command from the root folder of the',
            f'   project:',
            f'',
            f'       $ {sys.executable} setup.py webhost',
            f'',
            f' * or download or build the Azure Functions Host and then write',
            f'   the full path to WebHost.dll into the `PYAZURE_WEBHOST_DLL`',
            f'   environment variable.  Alternatively, you can create the',
            f'   {WORKER_CONFIG.name} file in the root folder of the project',
            f'   with the following structure:',
            f'',
            f'       [webhost]',
            f'       dll = /path/Microsoft.Azure.WebJobs.Script.WebHost.dll',
        ]))

    worker_path = os.environ.get('PYAZURE_WORKER_DIR')
    if not worker_path:
        worker_path = WORKER_PATH
    else:
        worker_path = pathlib.Path(worker_path)

    if not worker_path.exists():
        raise RuntimeError(f'Worker path {worker_path} does not exist')

    # Casting to strings is necessary because Popen doesn't like
    # path objects there on Windows.
    extra_env = {
        'AzureWebJobsScriptRoot': str(script_root),
        'workers:config:path': str(worker_path),
        'workers:python:path': str(worker_path / 'python' / 'worker.py'),
        'host:logger:consoleLoggingMode': 'always',
    }

    if testconfig and 'azure' in testconfig:
        st = testconfig['azure'].get('storage_key')
        if st:
            extra_env['AzureWebJobsStorage'] = st

        cosmos = testconfig['azure'].get('cosmosdb_key')
        if cosmos:
            extra_env['AzureWebJobsCosmosDBConnectionString'] = cosmos

    if port is not None:
        extra_env['ASPNETCORE_URLS'] = f'http://*:{port}'

    return subprocess.Popen(['dotnet', str(dll)],
                            cwd=script_root,
                            env={
                                **os.environ,
                                **extra_env,
                            },
                            stdout=stdout,
                            stderr=stderr)
 def __init__(self):
     self.config = configparser.ConfigParser()
     self.config.read(os.path.abspath('../example.ini'))
示例#28
0
def getDefaults():
    # Hardcoded defaults
    dflt = {'system': {'user': '******',
                       'group': 'sks',
                       'compress': 'xz',
                       'svcs': ['sks-db', 'sks-recon'],
                       'logfile': '/var/log/sksdump.log',
                       'days': 1,
                       'dumpkeys': 15000},
            'sync': {'throttle': 0},
            'paths': {'basedir': '/var/lib/sks',
                      'destdir': '/srv/http/sks/dumps',
                      'rsync': ('[email protected]:' +
                                '/srv/http/sks/dumps'),
                      'sksbin': '/usr/bin/sks'},
            'runtime': {'nodump': None, 'nocompress': None, 'nosync': None}}
    ## Build out the default .ini.
    dflt_b64 = ("""IyBJTVBPUlRBTlQ6IFRoaXMgc2NyaXB0IHVzZXMgY2VydGFpbiBwZXJtaXNz
                   aW9ucyBmdW5jdGlvbnMgdGhhdCByZXF1aXJlIHNvbWUKIyBmb3JldGhvdWdo
                   dC4KIyBZb3UgY2FuIGVpdGhlciBydW4gYXMgcm9vdCwgd2hpY2ggaXMgdGhl
                   ICJlYXN5IiB3YXksIE9SIHlvdSBjYW4gcnVuIGFzIHRoZQojIHNrcyB1c2Vy
                   IChvci4uLiB3aGF0ZXZlciB1c2VyIHlvdXIgU0tTIGluc3RhbmNlIHJ1bnMg
                   YXMpLgojIEl0IGhhcyB0byBiZSBvbmUgb3IgdGhlIG90aGVyOyB5b3UnbGwg
                   U0VSSU9VU0xZIG1lc3MgdGhpbmdzIHVwIG90aGVyd2lzZS4KIyBJZiB5b3Ug
                   cnVuIGFzIHRoZSBza3MgdXNlciwgTUFLRSBTVVJFIHRoZSBmb2xsb3dpbmcg
                   aXMgc2V0IGluIHlvdXIgc3Vkb2VycwojICh3aGVyZSBTS1NVU0VSIGlzIHRo
                   ZSB1c2VybmFtZSBza3MgcnVucyBhcyk6CiMJQ21uZF9BbGlhcyBTS1NDTURT
                   ID0gL3Vzci9iaW4vc3lzdGVtY3RsIHN0YXJ0IHNrcy1kYixcCiMJICAgICAg
                   ICAgICAgICAgICAgICAgL3Vzci9iaW4vc3lzdGVtY3RsIHN0b3Agc2tzLWRi
                   LFwKIyAgICAgICAgICAgICAgICAgICAgICAgIC91c3IvYmluL3N5c3RlbWN0
                   bCBzdGFydCBza3MtcmVjb24sXAojCQkgICAgICAgICAgICAgICAgIC91c3Iv
                   YmluL3N5c3RlbWN0bCBzdG9wIHNrcy1yZWNvbgojCVNLU1VTRVIgQUxMID0g
                   Tk9QQVNTV0Q6IFNLU0NNRFMKCiMgVGhpcyB3YXMgd3JpdHRlbiBmb3Igc3lz
                   dGVtZCBzeXN0ZW1zIG9ubHkuIFR3ZWFraW5nIHdvdWxkIGJlIG5lZWRlZCBm
                   b3IKIyBub24tc3lzdGVtZCBzeXN0ZW1zIChzaW5jZSBldmVyeSBub24tc3lz
                   dGVtZCB1c2VzIHRoZWlyIG93biBpbml0IHN5c3RlbQojIGNhbGxhYmxlcy4u
                   LikKCiMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMj
                   IyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMKCiMgVGhp
                   cyBzZWN0aW9uIGNvbnRyb2xzIHZhcmlvdXMgc3lzdGVtIGNvbmZpZ3VyYXRp
                   b24uCltzeXN0ZW1dCgojIFRoaXMgc2hvdWxkIGJlIHRoZSB1c2VyIFNLUyBy
                   dW5zIGFzLgp1c2VyID0gc2tzCgojIFRoaXMgaXMgdGhlIGdyb3VwIHRoYXQg
                   U0tTIHJ1bnMgYXMuCmdyb3VwID0gc2tzCgojIElmIGVtcHR5LCBkb24ndCBj
                   b21wcmVzcyBkdW1wcy4KIyBJZiBvbmUgb2Y6IHh6LCBneiwgYnoyLCBvciBs
                   cnogKGZvciBscnppcCkgdGhlbiB1c2UgdGhhdCBjb21wcmVzc2lvbiBhbGdv
                   LgojIE5vdGUgdGhhdCBscnppcCByZXF1aXJlcyBleHRyYSBpbnN0YWxsYXRp
                   b24uCmNvbXByZXNzID0geHoKCiMgVGhlc2Ugc2VydmljZXMgd2lsbCBiZSBz
                   dG9wcGVkL3N0YXJ0ZWQsIGluIG9yZGVyLCBiZWZvcmUvYWZ0ZXIgZHVtcHMu
                   IElmIG1vcmUKIyB0aGFuIG9uZSwgc2VwZXJhdGUgYnkgY29tbWFzLgpzdmNz
                   ID0gc2tzLWRiLHNrcy1yZWNvbgoKIyBUaGUgcGF0aCB0byB0aGUgbG9nZmls
                   ZS4KbG9nZmlsZSA9IC92YXIvbG9nL3Nrc2R1bXAubG9nCgojIFRoZSBudW1i
                   ZXIgb2YgZGF5cyBvZiByb3RhdGVkIGtleSBkdW1wcy4gSWYgZW1wdHksIGRv
                   bid0IHJvdGF0ZS4KZGF5cyA9IDEKCiMgSG93IG1hbnkga2V5cyB0byBpbmNs
                   dWRlIGluIGVhY2ggZHVtcCBmaWxlLgpkdW1wa2V5cyA9IDE1MDAwCgoKIyBU
                   aGlzIHNlY3Rpb24gY29udHJvbHMgc3luYyBzZXR0aW5ncy4KW3N5bmNdCgoj
                   IFRoaXMgc2V0dGluZyBpcyB3aGF0IHRoZSBzcGVlZCBzaG91bGQgYmUgdGhy
                   b3R0bGVkIHRvLCBpbiBLaUIvcy4gSWYgZW1wdHkgb3IKIyAwLCBwZXJmb3Jt
                   IG5vIHRocm90dGxpbmcuCnRocm90dGxlID0gMAoKCiMgVGhpcyBzZWN0aW9u
                   IGNvbnRyb2xzIHdoZXJlIHN0dWZmIGdvZXMgYW5kIHdoZXJlIHdlIHNob3Vs
                   ZCBmaW5kIGl0LgpbcGF0aHNdCgojIFdoZXJlIHlvdXIgU0tTIERCIGlzLgpi
                   YXNlZGlyID0gL3Zhci9saWIvc2tzCgojIFRoaXMgaXMgdGhlIGJhc2UgZGly
                   ZWN0b3J5IHdoZXJlIHRoZSBkdW1wcyBzaG91bGQgZ28uCiMgVGhlcmUgd2ls
                   bCBiZSBhIHN1Yi1kaXJlY3RvcnkgY3JlYXRlZCBmb3IgZWFjaCBkYXRlLgpk
                   ZXN0ZGlyID0gL3Nydi9odHRwL3Nrcy9kdW1wcwoKIyBUaGUgcGF0aCBmb3Ig
                   cnN5bmNpbmcgdGhlIGR1bXBzLiBJZiBlbXB0eSwgZG9uJ3QgcnN5bmMuCnJz
                   eW5jID0gcm9vdEBtaXJyb3Iuc3F1YXJlLXIwMHQubmV0Oi9zcnYvaHR0cC9z
                   a3MvZHVtcHMKCiMgVGhlIHBhdGggdG8gdGhlIHNrcyBiaW5hcnkgdG8gdXNl
                   Lgpza3NiaW4gPSAvdXNyL2Jpbi9za3MKCgojIFRoaXMgc2VjdGlvbiBjb250
                   cm9scyBydW50aW1lIG9wdGlvbnMuIFRoZXNlIGNhbiBiZSBvdmVycmlkZGVu
                   IGF0IHRoZQojIGNvbW1hbmRsaW5lLiBUaGV5IHRha2Ugbm8gdmFsdWVzOyB0
                   aGV5J3JlIG1lcmVseSBvcHRpb25zLgpbcnVudGltZV0KCiMgRG9uJ3QgZHVt
                   cCBhbnkga2V5cy4KIyBVc2VmdWwgZm9yIGRlZGljYXRlZCBpbi10cmFuc2l0
                   L3ByZXAgYm94ZXMuCjtub2R1bXAKCiMgRG9uJ3QgY29tcHJlc3MgdGhlIGR1
                   bXBzLCBldmVuIGlmIHdlIGhhdmUgYSBjb21wcmVzc2lvbiBzY2hlbWUgc3Bl
                   Y2lmaWVkIGluCiMgdGhlIFtzeXN0ZW06Y29tcHJlc3NdIHNlY3Rpb246ZGly
                   ZWN0aXZlLgo7bm9jb21wcmVzcwoKIyBEb24ndCBzeW5jIHRvIGFub3RoZXIg
                   c2VydmVyL3BhdGgsIGV2ZW4gaWYgb25lIGlzIHNwZWNpZmllZCBpbiBbcGF0
                   aHM6cnN5bmNdLgo7bm9zeW5j""")
    realcfg = configparser.ConfigParser(defaults = dflt, allow_no_value = True)
    if not os.path.isfile(cfgfile):
        with open(cfgfile, 'w') as f:
            f.write(base64.b64decode(dflt_b64).decode('utf-8'))
    realcfg.read(cfgfile)
    return(realcfg)
示例#29
0
from ml_seq2seq import seq2seq_train 
from tokenizer import Tokenizer
from data_utils import get_training_data

if __name__ == "__main__":

    """
    Main driver program to launch the parallel pipeline for processing 
    github commits. 
    """

    parser = argparse.ArgumentParser(description='cmod')
    parser.add_argument('-c', '--config', help='Config file path', required=True)
    parser.add_argument('-e', '--erase', help='Clean previous run data', action='store_true')

    cfg_parser = configparser.ConfigParser()

    args = parser.parse_args()

    cfg_parser.read(args.config)

    cfg = config_ml.ConfigML(cfg_parser)

    for section_name in cfg_parser:
        print('Section:', section_name)
        section = cfg_parser[section_name]
        for name in section:
            print('  {} = {}'.format(name, section[name]))
        print()

示例#30
0
 def __parse_settings(self, string):
   config = configparser.ConfigParser()
   config.read_string("[DEFAULT]\n" + string)
   return config.defaults()