示例#1
0
def pprint_dict(dict_like: dict):
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(dict_like)
示例#2
0
def weight(reports):
    
    stop_words = set(stopwords.words('english'))

    chars_to_remove = ['?', '!', '[', ']', '`', '\'\'', '<', '>', '(', ')', ',', ':']
    rx = '[' + re.escape(''.join(chars_to_remove)) + ']'

    words = {}
    i = 1
    for report in reports:
        words[i] = []
        for k1 in report.keys():
            if (k1 != 'title'):
                for k2 in report[k1]['text'].keys():
                    sentence = report[k1]['text'][k2]
                    sentence = re.sub(r'(?<!\d)\.(?!\d)', '', sentence)
                    sentence = re.sub(rx, '', sentence)
                    sentence = sentence.lower()
                    
                    word_tokens = word_tokenize(sentence)
                    for w in word_tokens:
                        if w not in stop_words:
                            words[i].append(w)
        
        # print(words[i])
        words[i] = list(filter(None, words[i]))
        words[i] = list(set(words[i]))
        # print(words[i])
        i += 1
        # break

    s_words = {}
    t_words = {}
    r_words = {}
    i = 1
    for report in reports:
        s_words[i] = {}
        t_words[i] = {}
        r_words[i] = {}
        for k1 in report.keys():
            if (k1 != 'title'):
                user = report[k1]['user']
                t_words[i][k1] = []
                if user not in s_words[i]:
                    s_words[i][user]=[]
        
                for k2, v in report[k1]['text'].items():
                    sentence = v
                    sentence = re.sub(r'(?<!\d)\.(?!\d)', '', sentence)
                    sentence = re.sub(rx, '', sentence)
                    sentence = sentence.lower()

                    word_tokens = word_tokenize(sentence)
                    
                    r_words[i][k2] = []
                    temp = []
                    for w in word_tokens:
                        if w not in stop_words:
                            r_words[i][k2].append(w)
                            temp.append(w)

                    s_words[i][user].extend(temp)
                    t_words[i][k1].extend(temp)
        
        i += 1
        # break

    for k1 in s_words.keys():
        for k2 in s_words[k1].keys():
            s_words[k1][k2] = nltk.FreqDist(s_words[k1][k2])

    for k1 in t_words.keys():
        for k2 in t_words[k1].keys():
            t_words[k1][k2] = nltk.FreqDist(t_words[k1][k2])

    pp = pprint.PrettyPrinter(indent=4)
    # pp.pprint(t_words[1])
    # pp.pprint(s_words[1])
    # pp.pprint(r_words[1])

    sprob = {}
    tprob = {}
    for r_key in words.keys():
        sprob[r_key] = {}
        for i in range(len(words[r_key])-1):
            max = 0
            sum = 0
            for u_key in s_words[r_key].keys():
                if words[r_key][i] in s_words[r_key][u_key]:
                    if s_words[r_key][u_key][words[r_key][i]] > max:
                        max = s_words[r_key][u_key][words[r_key][i]]
                    sum += s_words[r_key][u_key][words[r_key][i]]
            if sum != 0:
                sprob[r_key][words[r_key][i]] = max / sum
            else:
                sprob[r_key][words[r_key][i]] = sum

        tprob[r_key] = {}
        for i in range(len(words[r_key])-1):
            max = 0
            sum = 0
            for t_key in t_words[r_key].keys():
                if words[r_key][i] in t_words[r_key][t_key]:
                    if t_words[r_key][t_key][words[r_key][i]] > max:
                        max = t_words[r_key][t_key][words[r_key][i]]
                    sum += t_words[r_key][t_key][words[r_key][i]]
            if sum != 0:
                tprob[r_key][words[r_key][i]] = max / sum
            else:
                tprob[r_key][words[r_key][i]] = sum

    # pp = pprint.PrettyPrinter(indent=4)
    # pp.pprint(tprob[35])
    # pp.pprint(sprob[35])

    return sprob, tprob, r_words
示例#3
0
                    datefmt="%m-%d %H:%M:%S")

import os
import sys
import urllib
import pprint
import tarfile
import tensorflow as tf

import datetime
import dateutil.tz
import numpy as np

import scipy.misc

pp = pprint.PrettyPrinter().pprint
logger = logging.getLogger(__name__)


def mprint(matrix, pivot=0.5):
    for array in matrix:
        print "".join("#" if i > pivot else " " for i in array)


def show_all_variables():
    total_count = 0
    for idx, op in enumerate(tf.trainable_variables()):
        shape = op.get_shape()
        count = np.prod(shape)
        print "[%2d] %s %s = %s" % (idx, op.name, shape, count)
        total_count += int(count)
示例#4
0
def printSam(sam):
    pp = pprint.PrettyPrinter()
    toFile = []

    toFile.append('anaVersion="' + anaVersion + '"\n')
    toFile.append('anaType="' + anaType + '"\n')
    toFile.append(preamble + "\n")
    toFile.append('rootPath="' + rootPath(dateTT) + '"\n')

    toFile.append('sam = {}' + "\n")

    for s in sorted(sam.keys()):
        toFile.append("\n")
        toFile.append('sam["' + s + '"]={}' + "\n")
        for atr in sam[s]:
            toFile.append('sam["' + s + '"]["' + atr + '"]=' +
                          pp.pformat(sam[s][atr]) + "\n")

    epilogue = onTheFlyCustomization()
    toFile.append(epilogue)

    toFile.append('''def fixLocalPaths(sam):
        import os,imp
        if "SmallXAnaDefFile" not in os.environ:
            print "Please set SmallXAnaDefFile environment variable:"
            print "export SmallXAnaDefFile=FullPathToFile"
            raise Exception("Whooops! SmallXAnaDefFile env var not defined")

        anaDefFile = os.environ["SmallXAnaDefFile"]
        mod_dir, filename = os.path.split(anaDefFile)
        mod, ext = os.path.splitext(filename)
        f, filename, desc = imp.find_module(mod, [mod_dir])
        mod = imp.load_module(mod, f, filename, desc)

        localBasePathPAT = mod.PATbasePATH
        localBasePathTrees = mod.TTreeBasePATH

        for s in sam:
            if "pathPAT" in sam[s]:
                sam[s]["pathPAT"] = sam[s]["pathPAT"].replace("XXXTMFPAT", localBasePathPAT)
            if "pathTrees" in sam[s]:
                sam[s]["pathTrees"] = sam[s]["pathTrees"].replace("XXXTMFTTree", localBasePathTrees)
            #print sam[s]["pathPAT"]
            #print sam[s]["pathTrees"]
        return sam
sam = fixLocalPaths(sam)
''')

    ofile = "Samples_" + anaVersion + ".py"
    #'''
    if os.path.isfile(ofile):
        import random
        import string
        char_set = string.ascii_uppercase + string.digits
        name = ''.join(random.sample(char_set * 6, 6))
        ofileBak = ofile + "_" + name
        print "BAK:", ofileBak
        os.system("cp " + ofile + " " + ofileBak)
    #'''
    print "Please remember to do diff on new " + ofile + " and the one in python/samples dir"

    outputFile = open("Samples_" + anaVersion + ".py", "w")
    for line in toFile:
        outputFile.write(line)
示例#5
0
 def prettyprint(self):
     import pprint
     pp = pprint.PrettyPrinter(indent=2)
     pp.pprint(self.geth())
示例#6
0
    def compareOffsets(self):
        """For the list of tpos in the source cluster, look them up in the destination
    and compare value hashes; if they match all good; if not, iterate over records
    until a match is found (where duration is one millisecond, based on the 
    assumption that multiple messages have been produced during the same millisecond)
    """

        self.logger.info(
            "Comparing offsets between source and destination cluster...")

        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self._metadata)

        # Check that we have destination cluster offsets and hashes before proceeding - if not, we
        # have incomplete data and should explode into a ball of flames to the sound of a distorted
        # sitar being played backwards.
        counter = 0
        for k in self._metadata.keys():
            if self._metadata[k]['dest_hash'] is None or    \
               self._metadata[k]['dest_offset'] is None or  \
               self._metadata[k]['src_hash'] is None:
                counter += 1

        if counter > 0:
            raise Exception(
                f"{counter} out of {len(self._metadata)} topic partitions have insufficient data. Exiting."
            )

        translated_offsets, unmatched_offsets = self.findMatchingMessages()

        self.logger.info("Working on unmatched offsets...")

        messages_found = 0
        for md in unmatched_offsets:
            tpo = md['dest_tpo']
            (starting_offset, target_offset) = self.findOffsetRangeToScan(md)

            for offset in range(starting_offset, target_offset):
                self.logger.info(
                    f"Inspecting destination cluster message at offset {offset}..."
                )
                results = self.inspectTPOMessages(
                    [TopicPartition(tpo.topic, tpo.partition, offset)],
                    cluster="destination")
                if len(results) == 0:
                    raise Exception(
                        "Didn't get any metadata from call to inspectTPOMessages(). This implies we read data from the source cluster, but couldn't inspect any messages in the destination cluster. Stopping."
                    )
                elif len(results) > 1:
                    raise Exception(
                        f"Expecting only one result from call to inspectTPOMessages, but got {len(results)}. Stopping"
                    )
                else:
                    # Get the (only) key from the dict
                    key = next(iter(results))
                    dest_hash = results[key]['dest_hash']
                    dest_tpo = results[key]['dest_tpo']
                    dest_message = results[key]['dest_message']

                    if dest_hash == md['src_hash']:
                        self.logger.info("   FOUND matching record: ")
                        self.logger.info(
                            f"                         source hash was {md['src_hash']}, and"
                        )
                        self.logger.info(
                            f"                         dest_hash is    {dest_hash}"
                        )
                        self.logger.info(
                            f".                        destination     {dest_tpo}"
                        )
                        self._metadata[key]['matched'] = True

                        # Update our metadata to accurately reflect the correct destination message
                        self._metadata[key][
                            'dest_offset'] = dest_message.offset()
                        self._metadata[key]['dest_hash'] = dest_hash
                        self._metadata[key][
                            'dest_timestamp'] = dest_message.timestamp()[1]
                        self._metadata[key]['dest_tpo'] = dest_tpo
                        self._metadata[key]['dest_message'] = dest_message

                        translated_offsets.append(dest_tpo)
                        messages_found += 1

                        # Found it so stop iterating
                        break

        self.logger.info(
            f"Found {messages_found} out of {len(unmatched_offsets)} unmatched objects."
        )
        # Sort the offset map by partition number, which may have become out of
        # order if we needed to read and hash messages to find a hash match
        return sorted(translated_offsets, key=lambda k: k.partition)
示例#7
0
def display(data, variableName='valid_file_blocks'):
    import pprint
    pp = pprint.PrettyPrinter(indent=4,width=120)
    if(variableName.strip()!=''):
        print('%s=\\'%variableName)
    pp.pprint(data)
示例#8
0
'''
dynamoDBstreams functions for WeirdAAL
'''

import boto3
import botocore
import pprint
import os
import sys

pp = pprint.PrettyPrinter(indent=5, width=80)

# from http://docs.aws.amazon.com/general/latest/gr/rande.html
regions = [
    'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1',
    'eu-central-1', 'eu-west-1', 'eu-west-2', 'ap-northeast-1',
    'ap-northeast-2', 'ap-southeast-1', 'ap-southeast-2'
]
'''
Code to get the AWS_ACCESS_KEY_ID from boto3
'''
session = boto3.Session()
credentials = session.get_credentials()
AWS_ACCESS_KEY_ID = credentials.access_key


def list_dynamodbstreams():
    '''
    Use list_streams function in dynamodbstreams to list available streams
    '''
    print("### Printing DynamoDBstreams ###")
示例#9
0
def pp(obj):
    import pprint
    pp = pprint.PrettyPrinter(indent=1, width=160)
    logger.info(pp.pformat(obj))
示例#10
0
文件: repokid.py 项目: frite/repokid
def rollback_role(account_number, role_name, selection=None, commit=None):
    role_data = _find_role_in_cache(role_name)
    if not role_data:
        LOGGER.error("Couldn't find role ({}) in cache".format(role_name))
        return

    # no option selected, display a table of options
    if not selection:
        headers = [
            'Number', 'Source', 'Discovered', 'Policy Length',
            'Policy Contents'
        ]
        rows = []
        for index, policies_version in enumerate(role_data['Policies']):
            rows.append([
                index, policies_version['Source'],
                policies_version['Discovered'],
                len(str(policies_version['Policy'])),
                str(policies_version['Policy'])[:50]
            ])
        print tabulate(rows, headers=headers)
        return

    from cloudaux import CloudAux
    conn = CONFIG['connection_iam']
    conn['account_number'] = account_number
    ca = CloudAux(**conn)

    current_policies = get_role_inline_policies(role_data, **conn)

    if selection and not commit:
        pp = pprint.PrettyPrinter()
        print "Will restore the following policies:"
        pp.pprint(role_data['Policies'][int(selection)]['Policy'])
        print "Current policies:"
        pp.pprint(current_policies)
        return

    # if we're restoring from a version with fewer policies than we have now, we need to remove them to
    # complete the restore.  To do so we'll store all the policy names we currently have and remove them
    # from the list as we update.  Any policy names left need to be manually removed
    policies_to_remove = current_policies.keys()

    for policy_name, policy in role_data['Policies'][int(
            selection)]['Policy'].items():
        try:
            LOGGER.info("Pushing cached policy: {}".format(policy_name))
            ca.call('iam.client.put_role_policy',
                    RoleName=role_data['RoleName'],
                    PolicyName=policy_name,
                    PolicyDocument=json.dumps(policy, indent=2,
                                              sort_keys=True))
        except Exception as e:
            LOGGER.error("Unable to push policy {}.  Error: {}".format(
                policy_name, e.message))
        else:
            # remove the policy name if it's in the list
            try:
                policies_to_remove.remove(policy_name)
            except:
                pass

    if policies_to_remove:
        for policy_name in policies_to_remove:
            try:
                ca.call('iam.client.delete_role_policy',
                        RoleName=role_data['RoleName'],
                        PolicyName=policy_name)
            except Exception as e:
                LOGGER.error("Unable to delete policy {}.  Error: {}".format(
                    policy_name, e.message))

    role_data['policies'] = get_role_inline_policies(role_data, **conn) or {}
    roledata.add_new_policy_version(role_data, 'Restore')
    LOGGER.info('Successfully restored selected version of role policies')
示例#11
0
    def test_accepts_all_args(self):
        all_test_arguments = cluster_config_command.all_arguments

        default_arg_values = {
            '--region-url': None,
            '--uuid': None,
            '--init': False,
            '--tftp-port': None,
            '--tftp-root': None,
        }

        failures = {}

        # Try all cardinalities of combinations of arguments
        for r in range(len(all_test_arguments) + 1):
            for test_arg_names in combinations(all_test_arguments, r):
                test_values = {
                    '--region-url': factory.make_simple_http_url(),
                    '--uuid': str(uuid.uuid4()),
                    '--init': '',
                    '--tftp-port': str(factory.pick_port()),
                    '--tftp-root': factory.make_string(),
                }

                # Build a query dictionary for the given combination of args
                args_under_test = []
                for param_name in test_arg_names:
                    args_under_test.append(param_name)
                    if param_name != '--init':
                        args_under_test.append(test_values[param_name])

                parser = ArgumentParser()
                cluster_config_command.add_arguments(parser)

                # If both init and uuid are passed, argparse will generate
                # a nice ArgumentError exception, which unfortunately,
                # gets caught and sent to exit.
                if '--init' in test_arg_names and '--uuid' in test_arg_names:
                    expected_exception = ExpectedException(SystemExit, '2')
                    with expected_exception, patch('sys.stderr'):
                        parser.parse_known_args(args_under_test)

                else:
                    # Otherwise, parsed args with defaults as usual
                    observed_args = vars(
                        parser.parse_args(args_under_test))

                    expected_args = {}
                    for param_name in all_test_arguments:
                        parsed_param_name = param_name[2:].replace('-', '_')

                        if param_name not in test_arg_names:
                            expected_args[parsed_param_name] = \
                                default_arg_values[param_name]
                        else:
                            expected_args[parsed_param_name] = \
                                observed_args[parsed_param_name]

                    if expected_args != observed_args:
                        failures[str(test_arg_names)] = {
                            'expected_args': expected_args,
                            'observed_args': observed_args,
                        }

        error_message = io.StringIO()
        error_message.write(
            "One or more key / value argument list(s)"
            "passed in the query string (expected_args)"
            "to the API do not match the values in "
            "the returned query string. This "
            "means that some arguments were "
            "dropped / added / changed by the "
            "the function, which is incorrect "
            "behavior. The list of incorrect "
            "arguments is as follows: \n")
        pp = pprint.PrettyPrinter(depth=3, stream=error_message)
        pp.pprint(failures)
        self.assertDictEqual({}, failures, error_message.getvalue())
示例#12
0
 def print_python_map(self):
     pp = pprint.PrettyPrinter(indent=2)
     pp.pprint(self.python_map)
示例#13
0
def main():
    description = """
Tool to reproduce on-chain events locally. 
This can run either as a command-line tool, or as a webapp using a built-in flask interface.
    """
    examples = """
Examples

# Reproduce a tx with a local evm binary
python3 reproducer.py --no-docker -g ~/go/src/github.com/ethereum/go-ethereum/build/bin/evm --hash 0xd6d519043d40691a36c9e718e47110309590e6f47084ac0ec00b53718e449fd3 

# Reproduce a tx with a docker evm
python3 reproducer.py -g holiman/gethvm --hash 0xd6d519043d40691a36c9e718e47110309590e6f47084ac0ec00b53718e449fd3

# Start the reproducer webapp using the default geth docker image: 
python3 reproducer.py -w localhost

Unfinished: 

* This does not _quite_ work with parity, yet, because parity does not load the code in genesis for the 'to'
  -account, it expects the code to be given as an argument. 

    """
    parser = argparse.ArgumentParser(
        description=description,
        epilog=examples,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    evmchoice = parser.add_mutually_exclusive_group()
    evmchoice.add_argument('-g',
                           '--geth-evm',
                           type=str,
                           help="Geth EVM binary or docker image name",
                           default="holiman/gethvm")
    evmchoice.add_argument('-p',
                           '--parity-evm',
                           type=str,
                           default=None,
                           help="Parity EVM binary or docker image name")

    parser.add_argument(
        "--no-docker",
        action="store_true",
        help="Set to true if using a local binary instead of a docker image")

    web_or_direct = parser.add_mutually_exclusive_group()
    web_or_direct.add_argument('-x',
                               '--hash',
                               type=str,
                               help="Don't run webapp, just lookup hash")
    if app:
        web_or_direct.add_argument(
            '-w',
            '--www',
            type=str,
            help="Run webapp on given interface (interface:port)")
        parser.add_argument(
            '-d',
            '--debug',
            action="store_true",
            default=False,
            help=
            "Run flask in debug mode (WARNING: debug on in production is insecure)"
        )

    parser.add_argument('-t',
                        '--test',
                        action="store_true",
                        default=False,
                        help="Dont run webapp, only local tests")

    web3settings = parser.add_argument_group(
        'Web3',
        'Settings about where to fetch information from (default infura)')
    web3settings.add_argument(
        "--web3",
        type=str,
        default="https://mainnet.infura.io/",
        help=
        "Web3 API url to fetch info from (default 'https://mainnet.infura.io/'"
    )

    args = parser.parse_args()

    # end of arg handling

    if args.parity_evm:
        vm = VMUtils.ParityVM(args.parity_evm, not args.no_docker)
    else:
        vm = VMUtils.GethVM(args.geth_evm, not args.no_docker)

    api = utils.getApi(args.web3)

    if args.test:
        artefacts = test(vm, api)
        import pprint
        pprint.PrettyPrinter().pprint(artefacts)
        sys.exit(0)

    if app and args.www:
        if ':' in args.www:
            host, port = args.www.split(':')
            port = port
        else:
            host = args.www
            port = 5000

        app.debug = args.debug
        app.api = api
        app.vm = vm
        app.run(host=host, port=port)

    elif args.hash:
        artefacts, vm_args = reproduce.reproduceTx(args.hash, vm, api)
        saved_files = utils.saveFiles(OUTPUT_DIR, artefacts)

        # Some tricks to get the right command for local replay
        p_gen = saved_files['parity genesis']['name']
        g_gen = saved_files['geth genesis']['name']
        vm_args['genesis'] = "%s/%s" % (OUTPUT_DIR, g_gen)

        print("\nCommand to execute locally (geth):\n")
        print("%s" % " ".join(vm.makeCommand(**vm_args)))
        print("\nWith memory:\n")
        vm_args['memory'] = True
        print("%s" % " ".join(vm.makeCommand(**vm_args)))
        vm_args.pop('json', None)
        vm_args.pop('memory', None)
        vm_args['statdump'] = "true"
        print("\nFor benchmarking:\n")
        print("%s" % " ".join(vm.makeCommand(**vm_args)))

        print("\nFor opviewing:\n")
        print("python3 opviewer.py -f %s/%s" %
              (saved_files['json-trace']['path'],
               saved_files['json-trace']['name']))

        print("\nFor opviewing with sources:\n")
        print(
            "python3 opviewer.py -f %s/%s --web3 '%s' -s path_to_contract_dir -j path_to_solc_combined_json --hash %s"
            % (saved_files['json-trace']['path'],
               saved_files['json-trace']['name'], args.web3, args.hash))

        logger.debug("creating zip archive for artefacts")
        prefix = args.hash[:8]
        output_archive = os.path.join(OUTPUT_DIR, "%s.zip" % prefix)
        # create a list of files to pack with zipFiles
        input_files = [(os.path.join(v['path'], v['name']), v['name'])
                       for v in saved_files]
        create_zip_archive(input_files=input_files,
                           output_archive=output_archive)

        print("\nZipped files into %s" % output_archive)

    else:
        parser.print_usage()
示例#14
0
import re
import pprint

pp = pprint.PrettyPrinter(width=1)
wires = {}
incomplete = []
incomproc = False
incomiter = 0


def connect(signal, wire):
    global wires
    if re.match(r'[0-9]+', str(signal)):
        wires[wire] = int(signal)
        # print("Connecting wire {0} with signal {1}".format(signal,wire))
        # print(wires)
    else:
        store(signal + " -> " + wire)


def isWired(connected):
    if connected in wires:
        return True


def store(commanded):
    incomplete.append(commanded)
    print("Storing {0}".format(commanded))
    pass

示例#15
0
#!/usr/bin/env python

import MySQLdb as mdb
import traceback
import sys
import time

from openpyxl import Workbook
from openpyxl import load_workbook

import pprint

from emedUtil import emed_ConnectToEMED

pp = pprint.PrettyPrinter(indent=1, depth=4)


def loadWStoEMED(type, wsdata, dbh):

    # Get a curstor into the database
    cur = dbh.cursor(mdb.cursors.DictCursor)

    wb = load_workbook(filename=wsdata[type]['fname'])
    ws = wb[wsdata[type]['input_sheetname']]

    skip_header = False
    if type == 'App' and ws["A1"].value == "AppName":
        skip_header = True
    if type == 'Trap' and ws["A1"].value == "EventName":
        skip_header = True
    if type == 'Internal' and ws["A1"].value == "EventName":
示例#16
0
 def dump(self):
     pp = pprint.PrettyPrinter(indent=4)
     print 'Initial Prob.'
     pp.pprint(self.initial)
     print 'Transition Prob.'
     pp.pprint(self.trans)
示例#17
0
import tempfile
import logging
import pprint

from eyed3.id3 import Tag, ID3_V2_4
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn  #, fuse_get_context
#import gmusicapi.exceptions
from gmusicapi import Mobileclient as GoogleMusicAPI
#from gmusicapi import Webclient as GoogleMusicWebAPI

reload(sys)  # Reload does the trick
sys.setdefaultencoding('UTF-8')

logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('gmusicfs')
pp = pprint.PrettyPrinter(indent=4)  # For debug logging

ALBUM_REGEX = '(?P<album>[^/]+) \((?P<year>[0-9]{4})\)'
ALBUM_FORMAT = u'{name} ({year:04d})'

TRACK_REGEX = '(?P<track>(?P<number>[0-9]+) - (?P<title>.*)\.mp3)'
TRACK_FORMAT = '{number:02d} - {name}.mp3'

ID3V1_TRAILER_SIZE = 128


def formatNames(string_from):
    """Format a name to make it suitable to use as a filename"""
    return re.sub('/', '-', string_from)

示例#18
0
    def __init__(self, train_data):
        print 'initializing GHMM model ...'
        self.hidden_states = ['H', 'B', 'OH', 'OB', 'IH', 'IB']
        self.initial_prob = {
            x: math.log(1.0 / 6.0)
            for x in self.hidden_states
        }

        self.emission_prob = {}
        self.transition_prob = {}
        for x in self.hidden_states:
            for y in self.hidden_states:
                self.transition_prob[(x, y)] = float("-inf")

        self.transition_prob[('H', 'IH')] = math.log(0.5)
        self.transition_prob[('H', 'OH')] = math.log(0.5)
        self.transition_prob[('B', 'IB')] = math.log(0.5)
        self.transition_prob[('B', 'OB')] = math.log(0.5)
        self.transition_prob[('IH', 'H')] = math.log(1.0)
        self.transition_prob[('OH', 'H')] = math.log(1.0)
        self.transition_prob[('IB', 'B')] = math.log(1.0)
        self.transition_prob[('OB', 'B')] = math.log(1.0)

        self.length_dist = {}
        for k, v in train_data.iteritems():
            self.length_dist[k] = {}

            for t in v:
                l = len(t)
                if l in self.length_dist[k]:
                    self.length_dist[k][l] += 1.0 / float(len(v))
                else:
                    self.length_dist[k][l] = 1.0 / float(len(v))

        for k, v in self.length_dist.iteritems():
            n_v = {
                a: (math.log(b) if b > 1e-10 else float("-inf"))
                for a, b in v.iteritems()
            }
            self.length_dist[k] = n_v

        for k in self.length_dist:
            for idx in range(1, MAX_PREV + 1):
                if idx not in self.length_dist[k]:
                    self.length_dist[k][idx] = float("-inf")

        self.max_length = {
            k: max(v.keys())
            for k, v in self.length_dist.iteritems()
        }
        if MACRO_PRINT:
            pp = pprint.PrettyPrinter(indent=4)
            print 'length distribution is:'
            pp.pprint(self.length_dist)

        self.markov = {}
        for k, v in train_data.iteritems():
            mkv = Markov(v)
            mkv.build()
            self.markov[k] = mkv

        if MACRO_PRINT:
            for k, v in self.markov.iteritems():
                print 'markov model for hidden state %s:' % k
                v.dump()
示例#19
0
class PlayerShell(cmd.Cmd):
    intro = 'Dumb Python Music Player. Type "help" or "?"" to list commands.\n'
    prompt = '> '
    token = prompt

    # Internals
    player = MusicPlayer()
    playlist = Queue()
    pp = pprint.PrettyPrinter(indent=4)
    running = True
    types = Enum(["PLAY", "PAUSE", "CLOSE", "RESUME"])
    messages = Queue()
    environment = get_environment()
    current = None
    repeat = False

    def preloop(self):
        self.chdir(self.environment['music_home'])
        consumer = threading.Thread(target=self.consumer_player)
        consumer.daemon = True
        consumer.start()

    def do_exit(self, arg):
        """Stop playing, and exit."""
        self.messages.put(self.types.CLOSE)
        self.running = False
        return True

    def do_repeat(self, arg):
        """Toggles playlist repeating"""
        self.repeat = not self.repeat
        val = "on" if self.repeat else "off"
        print("Repeat is now %s" % val)

    def do_cd(self, arg):
        """Change directory to the argument specified"""

        # shitty windows hack for "My" in the directory
        if os.name == "nt":
            if "My " in arg:
                arg.replace("My ", "")
        if os.path.isdir(arg):
            self.chdir(arg)
        else:
            print("%s is not a valid directory." % arg)

    def complete_cd(self, text, line, begidx, endidx):
        return self.complete_helper(text, line, begidx, endidx)

    def do_ls(self, arg):
        """List and print the current directory"""
        call("ls")

    # playlist options
    # TODO: can we group these better?
    def do_add(self, arg):
        """Adds a song to the playlist specified as the argument to this command"""
        f = get_filename(arg)
        if f:
            self.playlist.put(f)
        else:
            print("Not a valid selection to add to the playlist.")

    def complete_add(self, text, line, begidx, endidx):
        return self.complete_helper(text, line, begidx, endidx)

    def do_addall(self, arg):
        """Adds all songs in the current directory to the playlist, not recursively"""
        for song in self.list():
            f = get_filename(song)
            if f:
                self.playlist.put(f)

    def do_clear(self, arg):
        """Clears the entire playlist"""
        self.playlist = Queue()

    def do_show(self, arg):
        """Prints out the current song, if one is playing"""
        if self.current is not None:
            audio = EasyID3(self.current)
            print("%s - %s" % (audio['title'][0], audio['album'][0]))
        else:
            print("There is no song currently playing.")
        
    def do_showall(self, arg):
        """Prints out the entire playlist"""
        for queued in self.playlist.queue:
            audio = EasyID3(queued)
            print("%s - %s" % (audio['title'][0], audio['album'][0]))

    # song options
    # TODO: Is there a logical grouping of functionality here? And if so, how do we do that in the cmd module?

    def do_resume(self, arg):
        """Resume a paused song"""
        self.messages.put(self.types.RESUME)

    def do_pause(self, arg):
        """Pause a currently playing song"""
        self.messages.put(self.types.PAUSE)

    def do_skip(self, arg):
        """Stop the current song and play the next one in the playlist if it exists"""
        # By simply closing, our background thread will start the next song if there is one
        self.messages.put(self.types.CLOSE)

    def do_stop(self, arg):
        """Stop playing the current song and clear playlist"""
        self.messages.put(self.types.CLOSE)
        self.playlist = Queue()

    # helper methods
    def list(self):
        return os.listdir(".")

    def cwd(self):
        return os.getcwd()

    def chdir(self, dir):
        os.chdir(dir)
        self.prompt = self.cwd() + '\n' + self.token

    def complete_helper(self, text, line, begidx, endidx):
        current_directory = self.list()
        if text:
            mline = line.partition(' ')[2]
            offs = len(mline) - len(text)
            # mline = mline.encode("utf8")
            return [
                    s[offs:] for s in current_directory 
                    if s.startswith(mline)
                ]
        else:
            return current_directory

    def _get_next(self):
        finished_song = self.current
        song = self.playlist.get()
        self.player.play_song(song)
        self.current = song
        if self.repeat and finished_song is not None:
            self.playlist.put(finished_song)

    def consumer_player(self):
        while self.running:
            # interrupt to modify the current song somehow
            if not self.messages.empty():
                _cmd = self.messages.get()

                if _cmd == self.types.PLAY:
                    if not self.playlist.empty():
                        self._get_next()
                elif _cmd == self.types.CLOSE:
                    self.player.close_song()
                    if not self.repeat:
                        self.current = None
                elif _cmd == self.types.PAUSE:
                    self.player.pause_song()
                elif _cmd == self.types.RESUME:
                    self.player.resume_song()

            err_length, buf_length = self.player.length()
            err_position, buf_position = self.player.position()
            # we might be finished playing the current song
            try:
                position = int(buf_position)
                total_time = int(buf_length)
                if position >= total_time:
                    self.player.close_song()
                    if not self.playlist.empty():
                        self._get_next()
            except ValueError:
                if not self.playlist.empty():
                    self._get_next()

            time.sleep(1)
示例#20
0
class TestResourceGroup(unittest.TestCase):
    config = utils.load_yaml_from_file(
        os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))

    pp = pprint.PrettyPrinter(indent=4)
    identity_v1 = None
    inventory_v1 = None
    owner_id = None
    owner_pw = None
    owner_token = None

    @classmethod
    def setUpClass(cls):
        super(TestResourceGroup, cls).setUpClass()
        endpoints = cls.config.get('ENDPOINTS', {})

        cls.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity',
                                                               {}).get('v1'),
                                        version='v1',
                                        ssl_enabled=True)
        cls.inventory_v1 = pygrpc.client(endpoint=endpoints.get(
            'inventory', {}).get('v1'),
                                         version='v1')

        cls._create_domain()
        cls._create_domain_owner()
        cls._issue_owner_token()

    @classmethod
    def tearDownClass(cls):
        super(TestResourceGroup, cls).tearDownClass()
        cls.identity_v1.DomainOwner.delete(
            {
                'domain_id': cls.domain.domain_id,
                'owner_id': cls.owner_id
            },
            metadata=(('token', cls.owner_token), ))

        cls.identity_v1.Domain.delete({'domain_id': cls.domain.domain_id},
                                      metadata=(('token', cls.owner_token), ))

    @classmethod
    def _create_domain(cls):
        name = utils.random_string()
        params = {'name': name}
        cls.domain = cls.identity_v1.Domain.create(params)

    @classmethod
    def _create_domain_owner(cls):
        cls.owner_id = utils.random_string() + '@mz.co.kr'
        cls.owner_pw = utils.generate_password()

        params = {
            'owner_id': cls.owner_id,
            'password': cls.owner_pw,
            'domain_id': cls.domain.domain_id
        }

        owner = cls.identity_v1.DomainOwner.create(params)
        cls.domain_owner = owner

    @classmethod
    def _issue_owner_token(cls):
        token_param = {
            'user_type': 'DOMAIN_OWNER',
            'user_id': cls.owner_id,
            'credentials': {
                'password': cls.owner_pw
            },
            'domain_id': cls.domain.domain_id
        }

        issue_token = cls.identity_v1.Token.issue(token_param)
        cls.owner_token = issue_token.access_token

    def _create_project_group(self, name=None):
        if name is None:
            name = 'ProjectGroup-' + utils.random_string()

        params = {'name': name, 'domain_id': self.domain.domain_id}

        self.project_group = self.identity_v1.ProjectGroup.create(
            params, metadata=(('token', self.owner_token), ))

        self.project_groups.append(self.project_group)
        self.assertEqual(self.project_group.name, params['name'])

    def _create_project(self, project_group_id, name=None):
        if name is None:
            name = 'Project-' + utils.random_string()

        params = {
            'name': name,
            'project_group_id': project_group_id,
            'domain_id': self.domain.domain_id
        }

        self.project = self.identity_v1.Project.create(
            params, metadata=(('token', self.owner_token), ))

        self.projects.append(self.project)
        self.assertEqual(self.project.name, params['name'])

    def _print_data(self, message, description=None):
        print()
        if description:
            print(f'[ {description} ]')

        self.pp.pprint(MessageToDict(message,
                                     preserving_proto_field_name=True))

    def setUp(self):
        self.resource_groups = []
        self.resource_group = None
        self.projects = []
        self.project = None
        self.project_groups = []
        self.project_group = None

    def tearDown(self):
        for resource_Group in self.resource_groups:
            self.inventory_v1.ResourceGroup.delete(
                {
                    'resource_group_id': resource_Group.resource_group_id,
                    'domain_id': self.domain.domain_id
                },
                metadata=(('token', self.owner_token), ))
            print(
                f'>> delete resource group: {resource_Group.name} ({resource_Group.resource_group_id})'
            )

        for project in self.projects:
            self.identity_v1.Project.delete(
                {
                    'project_id': project.project_id,
                    'domain_id': self.domain.domain_id
                },
                metadata=(('token', self.owner_token), ))
            print(f'>> delete project: {project.name} ({project.project_id})')

        for project_group in self.project_groups:
            self.identity_v1.ProjectGroup.delete(
                {
                    'project_group_id': project_group.project_group_id,
                    'domain_id': self.domain.domain_id
                },
                metadata=(('token', self.owner_token), ))
            print(
                f'>> delete project group: {project_group.name} ({project_group.project_group_id})'
            )

    def test_create_resource_group(self, name=None, project_create=False):
        """ Create Resource Group
        """

        if not name:
            name = utils.random_string()

        params = {
            'name':
            name,
            'resources': [
                {
                    'resource_type':
                    'inventory.Server',
                    'filter': [
                        {
                            'k': 'data.compute.aws_tags.Schedule',
                            'v': 'abcde',
                            'o': 'eq'
                        },
                        {
                            'k': 'data.compute.aws_tags.Value',
                            'v': ['bbbbb'],
                            'o': 'eq'
                        },
                        # {'k': 'data.compute.aws_tags.Key', 'v': 'Policy', 'o': 'eq'},
                        # {'k': 'data.compute.aws_tags.Value', 'v': 'N', 'o': 'eq'}
                    ],
                    'keyword':
                    'aa bb cc'
                },
                {
                    'resource_type':
                    'CloudService?provider=aws&cloud_service_group=DynamoDB&cloud_service_type=Table',
                    'filter': [{
                        'k': 'data.compute.aws_tags.Schedule',
                        'v': 'Test',
                        'o': 'eq'
                    }, {
                        'k': 'data.compute.aws_tags.Value',
                        'v': 'aaa',
                        'o': 'eq'
                    }, {
                        'k': 'data.compute.aws_tags.Key',
                        'v': 'Policy',
                        'o': 'eq'
                    }, {
                        'k': 'data.compute.aws_tags.Value',
                        'v': 'N',
                        'o': 'eq'
                    }]
                },
            ],
            'options': {
                'raw_filter': 'aaa.bbb.ccc'
            },
            'domain_id':
            self.domain.domain_id
        }

        if project_create:
            self._create_project_group()
            self._create_project(self.project_group.project_group_id)

            params.update({'project_id': self.project.project_id})

        self.resource_group = self.inventory_v1.ResourceGroup.create(
            params, metadata=(('token', self.owner_token), ))

        self._print_data(self.resource_group, 'test_create_resource_group')

        self.resource_groups.append(self.resource_group)
        self.assertEqual(self.resource_group.name, name)

    def test_update_resource_group_name(self):
        self.test_create_resource_group()

        name = utils.random_string()
        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'name': name,
            'domain_id': self.domain.domain_id,
        }
        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))
        self.assertEqual(self.resource_group.name, name)

    def test_update_resource_group_resource(self):
        self.test_create_resource_group()
        update_resource = [
            {
                'resource_type': 'inventory.Server',
                'filter': [
                    {
                        'k': 'data.compute.xxxx',
                        'v': 'abcde',
                        'o': 'eq'
                    },
                ],
                'keyword': 'xx yy zz'
            },
        ]

        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'resources': update_resource,
            'domain_id': self.domain.domain_id,
        }
        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))

        self._print_data(self.resource_group,
                         'test_update_resource_group_resource')

        self.assertEqual(len(self.resource_group.resources),
                         len(update_resource))

    def test_update_resource_group_project(self):
        self.test_create_resource_group()

        self._create_project_group()
        self._create_project(
            project_group_id=self.project_group.project_group_id)

        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'project_id': self.project.project_id,
            'domain_id': self.domain.domain_id,
        }

        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))

        self.assertEqual(self.resource_group.project_id,
                         self.project.project_id)

    def test_update_resource_group_options(self):
        self.test_create_resource_group()

        options = {
            utils.random_string(): utils.random_string(),
            utils.random_string(): utils.random_string()
        }
        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'options': options,
            'domain_id': self.domain.domain_id,
        }
        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))

        self._print_data(self.resource_group,
                         'test_update_resource_group_options')

        self.assertEqual(MessageToDict(self.resource_group.options), options)

    def test_update_resource_group_tags(self):
        self.test_create_resource_group()

        tags = {
            utils.random_string(): utils.random_string(),
            utils.random_string(): utils.random_string()
        }
        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'tags': tags,
            'domain_id': self.domain.domain_id
        }
        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))
        resource_group_data = MessageToDict(self.resource_group)
        self.assertEqual(resource_group_data['tags'], tags)

    def test_update_resource_group_release_project(self):
        self.test_create_resource_group(project_create=True)

        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'release_project': True,
            'domain_id': self.domain.domain_id,
        }
        self.resource_group = self.inventory_v1.ResourceGroup.update(
            param, metadata=(('token', self.owner_token), ))
        self.assertEqual(self.resource_group.project_id, '')

    def test_get_resource_group(self):
        name = 'test-resource_group'
        self.test_create_resource_group(name)

        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'domain_id': self.domain.domain_id
        }
        self.resource_group = self.inventory_v1.ResourceGroup.get(
            param, metadata=(('token', self.owner_token), ))
        self.assertEqual(self.resource_group.name, name)

    def test_list_resource_group_id(self):
        self.test_create_resource_group(name='test-xxx')
        self.test_create_resource_group(name='test-yyy')

        param = {
            'resource_group_id': self.resource_group.resource_group_id,
            'domain_id': self.domain.domain_id
        }

        resource_groups = self.inventory_v1.ResourceGroup.list(
            param, metadata=(('token', self.owner_token), ))

        self.assertEqual(1, resource_groups.total_count)

    def test_list_resource_group_name(self):
        self.test_create_resource_group(name='test-xxx')
        self.test_create_resource_group(name='test-yyy')

        param = {'name': 'test-xxx', 'domain_id': self.domain.domain_id}

        resource_groups = self.inventory_v1.ResourceGroup.list(
            param, metadata=(('token', self.owner_token), ))

        self.assertEqual(1, resource_groups.total_count)

    def test_list_query(self):
        self.test_create_resource_group(name='test-xxx')
        self.test_create_resource_group(name='test-yyy', project_create=True)
        self.test_create_resource_group(name='test-yyy', project_create=True)
        self.test_create_resource_group(name='test-xxx')
        self.test_create_resource_group(name='test-xxx')

        param = {
            'domain_id': self.domain.domain_id,
            'query': {
                'filter': [{
                    'k':
                    'project_id',
                    'v':
                    list(map(lambda project: project.project_id,
                             self.projects)),
                    'o':
                    'in'
                }]
            }
        }

        resource_groups = self.inventory_v1.ResourceGroup.list(
            param, metadata=(('token', self.owner_token), ))
        self.assertEqual(2, resource_groups.total_count)

    def test_stat_resource_group(self):
        self.test_list_query()

        params = {
            'domain_id': self.domain.domain_id,
            'query': {
                'aggregate': [{
                    'group': {
                        'keys': [{
                            'key': 'resource_group_id',
                            'name': 'Id'
                        }],
                        'fields': [{
                            'operator': 'count',
                            'name': 'Count'
                        }]
                    }
                }, {
                    'sort': {
                        'key': 'Count',
                        'desc': True
                    }
                }]
            }
        }

        result = self.inventory_v1.ResourceGroup.stat(
            params, metadata=(('token', self.owner_token), ))

        print(result)
示例#21
0
import os
from travis import get_job_log
import glob
import re
import random
import time
import concurrent.futures
import shutil
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED, ALL_COMPLETED
from multiprocessing import Lock, Process, Queue, current_process, cpu_count

import queue
import sys
from log_retriever import read_job_log, dump_job_log, joblog
import gradle_log_parser, yarn_log_parser, maven_log_parser, grunt_log_parser, mocha_log_parser
pp = pprint.PrettyPrinter(depth=6)

#Regex
EXCEPTION_REGEX = "\.([A-Za-z0-9]+)Exception(\W{1,})"
ERRORS_CLASSES_REGEX = "\.([A-Za-z0-9]+)Error(\W{1,})"
PULL_REQUEST_OPEN_CANCELING_BUILD = "branch with open pull request, canceling the build"
BUILD_FAILURE_WHAT_WENT_WRONG = "\* What went wrong:\\r\\nExecution failed for task (.*)"

#Constants
OFFSET = 0
CSV_FOLDER = "csv"
JOBS_CSV = "csv/allJobs.csv"
LIMIT = 100
DEST_FOLDER = "../logs"
JOB_LOG_METRICS_COLUMNS = ["job_id", "build_target","build_tool", "build_canceled_open_pr_on_branch"\
, "errors", "failures", "suspected_words", "warnings", "skipped_words", "lines", "words",\
示例#22
0
# limitations under the License.

import os
import sys
import pprint
import argparse

import lib.logger as logger
from lib.config import Config
from lib.switch import SwitchFactory
from lib.switch_exception import SwitchException
from lib.genesis import GEN_PATH
# from write_switch_memory import WriteSwitchMemory

FILE_PATH = os.path.dirname(os.path.abspath(__file__))
PP = pprint.PrettyPrinter(indent=1, width=120)


class Tree(dict):
    """Instantiates a nested dictionary which allows assignment to arbitrary
    depths.
    """
    def __getitem__(self, key):
        if key in self:
            return self.get(key)
        return self.setdefault(key, Tree())


def _get_port_chan_list():
    """
    Args:
示例#23
0
文件: print.py 项目: larean/obsinfo
def _print_summary_other(instance):
    """ Print summary information about a generic information file """
    pp = pprint.PrettyPrinter(indent=1, depth=2, compact=True)
    pp.pprint(instance)
示例#24
0
import json
import pprint

with open('results.json' , 'r') as file:
    data = json.load(file)
    # data = json.loads("afsd")  #Differece b/w loads and load is load takes input as file and loads as string

printer = pprint.PrettyPrinter()
# printer.pprint(data)

language = data.get('2015ugcs089')
# printer.pprint(language)
printer.pprint(language.get("name"))

# ⚡CP:(C⭐C++⭐Algo⭐DS)⚡WebDev:(HTML⭐CSS⭐JS⭐Bootstrap⭐ReactJs⭐Redux⭐GraphQL)
# ⚡AppDev⚡Databases:(Firebase All Sections⭐ SQL⭐MongoDB)⚡Web Scraping(Scrapy&Selenium)

# 💡📍

# ⚡CP:(C⭐C++⭐Algo⭐DS)⚡WebDev:(HTML⭐CSS⭐JS⭐Bootstrap⭐ReactJs⭐Redux⭐GraphQL)
# ⚡AppDev⚡Databases:(Firebase All Sections⭐SQL⭐MongoDB)⚡Web Scraping(Scrapy&Selenium)

# ⚡CP:(C⭐C++⭐Algo⭐DS)⚡WebDev:(HTML⭐CSS⭐JS⭐Bootstrap⭐ReactJs⭐Redux⭐GraphQL)
# ⚡AppDev⚡Databases: (Firebase All Sections⭐SQL ⭐MongoDB)⚡Web Scraping (Scrapy&Selenium)

⚡CP:(C|C++|Algo|DS)⚡WebDev:(HTML|CSS|JS|Bootstrap|ReactJs|Redux|GraphQL)
⚡AppDev⚡Databases: (Firebase All Sections|SQL |MongoDB)⚡Web Scraping (Scrapy&Selenium)
示例#25
0
        index = string.find(substring, index + 1)
        if index == -1:
            break  # All occurrences have been found
        indices.append(index)
    return indices


agent_name = input("Enter the name of the agent you want to load-->")
pickle_filename4intents = str("./Agents/" + agent_name + "/" + agent_name +
                              "_intents.pickle")
pickle_filename4entities = str("./Agents/" + agent_name + "/" + agent_name +
                               "_entities.pickle")
#print(pickle_filename4entities)
pickle_in = open(pickle_filename4intents, "rb")
agent_intents = pickle.load(pickle_in)
pp = pprint.PrettyPrinter(indent=2, depth=6)
#pp.pprint(agent_intents)
pickle_in = open(pickle_filename4entities, "rb")
agent_entities = pickle.load(pickle_in)
pp = pprint.PrettyPrinter(indent=2, depth=6)
pp.pprint(agent_entities)
for k, v in agent_intents.items():

    print(agent_intents[k]['training_phrases'])
    dataMap = {}
    dataMap["intent_name"] = k
    dataMap["conversations"] = []
    for phrase in agent_intents[k]['training_phrases']:
        indices = find_substring('|', phrase)
        if len(indices) > 1:
            entity = []
示例#26
0
def check_circuit( circuit_def , debug = False ):
    '''

    Parameters:
    ------------------
        circuit_def : list
            Structure
            [  [ module name(string), out_list, in_list, internal_wiredef, internal_vars, lines ], [...], ... ]
    
    Returns:
    ---------------------
        result : bool
            It resurns True/False when check is passed/not passed.
    '''


    def eval_expr(dVar, arg):

        if isinstance(arg, int):
            return arg

        if isinstance(arg, str):
            return dVar[ arg ]

        if len(arg) == 3:
            arg1 = arg[1]
            arg2 = arg[2]
            if arg[0] == '+':
                return eval_expr(dVar, arg1) + eval_expr(dVar, arg2)
            if arg[0] == '-':
                return eval_expr(dVar, arg1) - eval_expr(dVar, arg2)
            if arg[0] == '*':
                return eval_expr(dVar, arg1) * eval_expr(dVar, arg2)
            if arg[0] == '/':
                return eval_expr(dVar, arg1) // eval_expr(dVar, arg2)
            if arg[0] == '==':
                return 1 if eval_expr(dVar, arg1) == eval_expr(dVar, arg2) else 0
            if arg[0] == '!=':
                return 0 if eval_expr(dVar, arg1) == eval_expr(dVar, arg2) else 1
            if arg[0] == '<':
                return 1 if eval_expr(dVar, arg1) < eval_expr(dVar, arg2) else 0
            if arg[0] == '>':
                return 1 if eval_expr(dVar, arg1) > eval_expr(dVar, arg2) else 0
            if arg[0] == '+=':
                incl = eval_expr(dVar, arg2)
                dVar[ arg1 ] += incl
                return dVar[ arg1 ]
            if arg[0] == '=':
                result = eval_expr(dVar, arg2)
                dVar[ arg1 ] = result
                return result

            print('Error: unknown operator "{}" appears in eval_expr()'.format(arg[0]), file=sys.stderr)

        return 0

    def update_used( lUsed, signal, dVar ):
        start = eval_expr(dVar, signal[1])
        end   = eval_expr(dVar, signal[2])

        for i in range(min(start,end), max(start,end)+1):
            lUsed.append( (signal[0],i) )
        
        return True, abs(start-end) + 1

    def check_lines(dModuleIn, dModuleOut, lines, dVar):

        lUsedInput  = []
        lUsedOutput = []
        sUsedModuleName = set()

        for line in lines:
            gate    = line[0]
            inst    = line[1]
            loutput = line[2]
            linput  = line[3]


            if gate == 'for':
                if inst in sUsedModuleName:
                    print('Error: instance name "{}" is duplicated.'.format(inst))
                    return False, lUsedInput, lUsedOutput

                sUsedModuleName.add( inst )

                result, lusedi, lusedo = check_for(dModuleIn, dModuleOut, line, dVar)

                lUsedInput  += lusedi
                lUsedOutput += lusedo

                if result != True :
                    print('Error: failed in for statement "{}" '.format(inst))
                    return False, lUsedInput, lUsedOutput
            
            elif gate == 'assign':
                resulti, input_width  = update_used(lUsedInput,  linput[0], dVar )
                resulto, output_width = update_used(lUsedOutput, loutput[0], dVar )

                if not(resulti == True and resulto == True and input_width == output_width and input_width == 1):
                    if input_width != output_width:
                        print('Error: bit width is inconsistent in "assign"', file=sys.stderr)
                    if input_width != 1:
                        print('Error: "assign" of multiple bits is not supported currently', file=sys.stderr)
                
                    return False, lUsedInput, lUsedOutput
            else:
                if not(gate in dModuleIn) or not(gate in dModuleOut):
                    print('Error: Unknown module or gate "{}" is used'.format(gate))
                    return False, lUsedInput, lUsedOutput

                if inst in sUsedModuleName:
                    print('Error: instance name "{}" is duplicated.'.format(inst))
                    return False, lUsedInput, lUsedOutput

                sUsedModuleName.add( inst )

                ninput  = len( dModuleIn[gate] )
                noutput = len( dModuleOut[gate] )
                if ninput != len(linput) :
                    print('Error: the number of inputs is not correct in "{}" (instance "{}")'.format(gate, inst))
                    return False, lUsedInput, lUsedOutput
                if noutput != len(loutput) :
                    print('Error: the number of outputs is not correct in "{}" (instance "{}")'.format(gate, inst))
                    return False, lUsedInput, lUsedOutput
                
                for i in range(0, ninput):
                    if len( linput[i] ) == 2 :
                        # gate input xxx@xxx
                        result, width  = update_used(lUsedInput,  linput[i][0], dVar )
                    else:
                        result, width  = update_used(lUsedInput,  linput[i], dVar )

                    if result != True:
                        return False, lUsedInput, lUsedOutput

                    if dModuleIn[gate][i] != width :
                        print('Error: input bit-width is not correct in "{}" (instance "{}")'.format(gate, inst))
                        return False, lUsedInput, lUsedOutput

                for i in range(0, noutput):
                    result, width  = update_used(lUsedOutput,  loutput[i], dVar )

                    if result != True:
                        return False, lUsedInput, lUsedOutput

                    if dModuleOut[gate][i] != width :
                        print('Error: output bit-width is not correct in "{}" (instance "{}")'.format(gate, inst))
                        return False, lUsedInput, lUsedOutput

            

        return True, lUsedInput, lUsedOutput


    def check_for(dModuleIn, dModuleOut, for_body, dVar):
        lUsedInput  = []
        lUsedOutput = []

        gate         = for_body[0]
        inst_name    = for_body[1]
        forCondition = for_body[2]
        lbody        = for_body[3]

        if( not( isinstance(forCondition, list) ) or len(forCondition) != 3 ):
            print('Error: for statement "{}" is not correct'.format(instanceName), file=sys.stderr)
            return False, lUsedInput, lUsedOutput

        eval_expr( dVar, forCondition[0] )
        loop = 0

        while loop < 1000:
            if eval_expr( dVar, forCondition[1] ) == 0:
                return True, lUsedInput, lUsedOutput

            result, lusedi, lusedo = check_lines(dModuleIn, dModuleOut, lbody, dVar)

            lUsedInput  += lusedi
            lUsedOutput += lusedo

            if result != True :
                print('Error: failed in for statement "{}" '.format(inst_name))
                return False, lUsedInput, lUsedOutput

            loop = loop + 1
            eval_expr( dVar, forCondition[2] )

        print('Error: for statement "{}" runs over {} times.'.format(inst_name, loop), file=sys.stderr)

        return False, lUsedInput, lUsedOutput


    def check_module( dModuleIn, dModuleOut, module_def):

        mod_name = mod[0]
        loutputs = mod[1]
        linputs  = mod[2]
        lwires   = mod[3]
        lvars    = mod[4]
        lbody    = mod[5]

        if len(linputs) > 0 and len(linputs[0]) == 2:
            linputs = [ inp for (inp, vect) in mod[2] ]


        dVar = {}
        for var in lvars:
            dVar[ var ] = 0
        
        result, lusedi, lusedo = check_lines(dModuleIn, dModuleOut, lbody, dVar)

        for i in range(0, len(lusedo)):
            if lusedo[i] in lusedo[i+1:]:
                print('Error: "{}" has multiple drivers in module "{}".'.format(lusedo[i], mod_name), file=sys.stderr)
                return False


        defined_wire = []
        for i in range(0, len(lwires)):
            start = eval_expr(dVar, lwires[i][1])
            end   = eval_expr(dVar, lwires[i][2])
            for j in range( min(start, end), max(start, end)+1 ):
                defined_wire.append( (lwires[i][0], j) )
                if not( (lwires[i][0], j) in lusedo ):
                    print('Warning: "{}[{}]" is not driven in module "{}".'.format(lwires[i][0], j, mod_name), file=sys.stderr)
                if not( (lwires[i][0], j) in lusedi ):
                    print('Warning: "{}[{}]" is defined but not used in module "{}".'.format(lwires[i][0], j, mod_name), file=sys.stderr)

        defined_output = []
        for i in range(0, len(loutputs)):
            start = eval_expr(dVar, loutputs[i][1])
            end   = eval_expr(dVar, loutputs[i][2])
            for j in range( min(start, end), max(start, end)+1 ):
                defined_output.append( (loutputs[i][0], j) )
                if not( (loutputs[i][0], j) in lusedo ):
                    print('Error: "{}[{}]" is not driven in module "{}".'.format(loutputs[i][0], j, mod_name), file=sys.stderr)
                    return False

        for outp in lusedo:
            if not( outp in defined_output + defined_wire):
                print('Error: "{}[{}]" is not defined but used as an input in module "{}".'.format(outp[0], outp[1], mod_name), file=sys.stderr)
                return False


        defined_input = []
        for i in range(0, len(linputs)):
            start = eval_expr(dVar, linputs[i][1])
            end   = eval_expr(dVar, linputs[i][2])
            for j in range( min(start, end), max(start, end)+1 ):
                defined_input.append( (linputs[i][0], j) )
                if not( (linputs[i][0], j) in lusedi ):
                    print('Warning: "{}[{}]" is defined but not used in module "{}".'.format(linputs[i][0], j, mod_name), file=sys.stderr)

        for inp in lusedi:
            if not( inp in defined_input + defined_wire ):
                print('Error: "{}[{}]" is not defined but used as an input in module "{}".'.format(inp[0], inp[1], mod_name), file=sys.stderr)
                return False


        if result != True:
            print('Error: occured in module "{}"'.format(mod_name))
            return False

        return True

    if debug :
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint( circuit_def )


    dModuleIn  = {}
    dModuleOut = {}
    for mod in circuit_def:
        mod_name = mod[0]
        loutputs = mod[1]
        linputs  = mod[2]

        if mod_name == '':
            continue

        linputWidth = []
        loutputWidth = []

        for i in range(0, len(linputs)):
            start = linputs[i][1]
            end   = linputs[i][2]
            linputWidth.append( abs(start-end) + 1 )

        for i in range(0, len(loutputs)):
            start = loutputs[i][1]
            end   = loutputs[i][2]
            loutputWidth.append( abs(start-end) + 1 )

        dModuleIn[ mod_name  ] = linputWidth
        dModuleOut[ mod_name ] = loutputWidth

    dModuleIn ['NDRO'] = dModuleIn ['ND'] = [1,1,1]
    dModuleOut['NDRO'] = dModuleOut['ND'] = [1]

    dModuleIn ['OR'] = dModuleIn ['XOR'] = dModuleIn ['AND'] = dModuleIn ['RDFF'] = [1,1,1]
    dModuleOut['OR'] = dModuleOut['XOR'] = dModuleOut['AND'] = dModuleOut['RDFF'] = [1]

    dModuleIn ['CB'] = dModuleIn ['D'] = dModuleIn ['NOT'] = [1,1]
    dModuleOut['CB'] = dModuleOut['D'] = dModuleOut['NOT'] = [1]

    dModuleIn ['RTFFB'] = dModuleIn ['T1'] = [1,1]
    dModuleOut['RTFFB'] = dModuleOut['T1'] = [1,1]

    for mod in circuit_def:
        result = check_module( dModuleIn, dModuleOut, mod)
        if result != True:
            return False
    
    return True
import os
import sys
import re

import pprint
pp = pprint.PrettyPrinter(indent=4)


def unique(seq):
    # order preserving
    checked = []
    for e in seq:
        if e not in checked:
            checked.append(e)
    return checked


with open("pharmgkb_attr.txt") as f:
    lines = f.readlines()

pharmgkb_mapping = {}

for line in lines:
    items = line.rstrip().split("\t")
    if items[0] == "Gene":
        pharmgkb_mapping[items[1]] = [items[2]]
        #DEBUG
        #print items[1] + "\t" + items[2]

#DEBUG
#pp.pprint(pharmgkb_mapping.keys())
class ControllerFromTrainruns():
    """Takes train runs, derives the actions from it and re-acts them."""
    pp = pprint.PrettyPrinter(indent=4)

    def __init__(self,
                 env: RailEnv,
                 trainrun_dict: Dict[int, Trainrun]):

        self.env: RailEnv = env
        self.trainrun_dict: Dict[int, Trainrun] = trainrun_dict
        self.action_plan: ActionPlanDict = [self._create_action_plan_for_agent(agent_id, chosen_path)
                                            for agent_id, chosen_path in trainrun_dict.items()]

    def get_waypoint_before_or_at_step(self, agent_id: int, step: int) -> Waypoint:
        """
        Get the way point point from which the current position can be extracted.

        Parameters
        ----------
        agent_id
        step

        Returns
        -------
        WalkingElement

        """
        trainrun = self.trainrun_dict[agent_id]
        entry_time_step = trainrun[0].scheduled_at
        # the agent has no position before and at choosing to enter the grid (one tick elapses before the agent enters the grid)
        if step <= entry_time_step:
            return Waypoint(position=None, direction=self.env.agents[agent_id].initial_direction)

        # the agent has no position as soon as the target is reached
        exit_time_step = trainrun[-1].scheduled_at
        if step >= exit_time_step:
            # agent loses position as soon as target cell is reached
            return Waypoint(position=None, direction=trainrun[-1].waypoint.direction)

        waypoint = None
        for trainrun_waypoint in trainrun:
            if step < trainrun_waypoint.scheduled_at:
                return waypoint
            if step >= trainrun_waypoint.scheduled_at:
                waypoint = trainrun_waypoint.waypoint
        assert waypoint is not None
        return waypoint

    def get_action_at_step(self, agent_id: int, current_step: int) -> Optional[RailEnvActions]:
        """
        Get the current action if any is defined in the `ActionPlan`.
        ASSUMPTION we assume the env has `remove_agents_at_target=True` and `activate_agents=False`!!

        Parameters
        ----------
        agent_id
        current_step

        Returns
        -------
        WalkingElement, optional

        """
        for action_plan_element in self.action_plan[agent_id]:
            scheduled_at = action_plan_element.scheduled_at
            if scheduled_at > current_step:
                return None
            elif current_step == scheduled_at:
                return action_plan_element.action
        return None

    def act(self, current_step: int) -> Dict[int, RailEnvActions]:
        """
        Get the action dictionary to be replayed at the current step.
        Returns only action where required (no action for done agents or those not at the beginning of the cell).

        ASSUMPTION we assume the env has `remove_agents_at_target=True` and `activate_agents=False`!!

        Parameters
        ----------
        current_step: int

        Returns
        -------
        Dict[int, RailEnvActions]

        """
        action_dict = {}
        for agent_id in range(len(self.env.agents)):
            action: Optional[RailEnvActions] = self.get_action_at_step(agent_id, current_step)
            if action is not None:
                action_dict[agent_id] = action
        return action_dict

    def print_action_plan(self):
        """Pretty-prints `ActionPlanDict` of this `ControllerFromTrainruns`  to stdout."""
        self.__class__.print_action_plan_dict(self.action_plan)

    @staticmethod
    def print_action_plan_dict(action_plan: ActionPlanDict):
        """Pretty-prints `ActionPlanDict` to stdout."""
        for agent_id, plan in enumerate(action_plan):
            print("{}: ".format(agent_id))
            for step in plan:
                print("  {}".format(step))

    @staticmethod
    def assert_actions_plans_equal(expected_action_plan: ActionPlanDict, actual_action_plan: ActionPlanDict):
        assert len(expected_action_plan) == len(actual_action_plan)
        for k in range(len(expected_action_plan)):
            assert len(expected_action_plan[k]) == len(actual_action_plan[k]), \
                "len for agent {} should be the same.\n\n  expected ({}) = {}\n\n  actual ({}) = {}".format(
                    k,
                    len(expected_action_plan[k]),
                    ControllerFromTrainruns.pp.pformat(expected_action_plan[k]),
                    len(actual_action_plan[k]),
                    ControllerFromTrainruns.pp.pformat(actual_action_plan[k]))
            for i in range(len(expected_action_plan[k])):
                assert expected_action_plan[k][i] == actual_action_plan[k][i], \
                    "not the same at agent {} at step {}\n\n  expected = {}\n\n  actual = {}".format(
                        k, i,
                        ControllerFromTrainruns.pp.pformat(expected_action_plan[k][i]),
                        ControllerFromTrainruns.pp.pformat(actual_action_plan[k][i]))
        assert expected_action_plan == actual_action_plan, \
            "expected {}, found {}".format(expected_action_plan, actual_action_plan)

    def _create_action_plan_for_agent(self, agent_id, trainrun) -> ActionPlan:
        action_plan = []
        agent = self.env.agents[agent_id]
        minimum_cell_time = int(np.ceil(1.0 / agent.speed_data['speed']))
        for path_loop, trainrun_waypoint in enumerate(trainrun):
            trainrun_waypoint: TrainrunWaypoint = trainrun_waypoint

            position = trainrun_waypoint.waypoint.position

            if Vec2d.is_equal(agent.target, position):
                break

            next_trainrun_waypoint: TrainrunWaypoint = trainrun[path_loop + 1]
            next_position = next_trainrun_waypoint.waypoint.position

            if path_loop == 0:
                self._add_action_plan_elements_for_first_path_element_of_agent(
                    action_plan,
                    trainrun_waypoint,
                    next_trainrun_waypoint,
                    minimum_cell_time
                )
                continue

            just_before_target = Vec2d.is_equal(agent.target, next_position)

            self._add_action_plan_elements_for_current_path_element(
                action_plan,
                minimum_cell_time,
                trainrun_waypoint,
                next_trainrun_waypoint)

            # add a final element
            if just_before_target:
                self._add_action_plan_elements_for_target_at_path_element_just_before_target(
                    action_plan,
                    minimum_cell_time,
                    trainrun_waypoint,
                    next_trainrun_waypoint)
        return action_plan

    def _add_action_plan_elements_for_current_path_element(self,
                                                           action_plan: ActionPlan,
                                                           minimum_cell_time: int,
                                                           trainrun_waypoint: TrainrunWaypoint,
                                                           next_trainrun_waypoint: TrainrunWaypoint):
        scheduled_at = trainrun_waypoint.scheduled_at
        next_entry_value = next_trainrun_waypoint.scheduled_at

        position = trainrun_waypoint.waypoint.position
        direction = trainrun_waypoint.waypoint.direction
        next_position = next_trainrun_waypoint.waypoint.position
        next_direction = next_trainrun_waypoint.waypoint.direction
        next_action = get_action_for_move(position,
                                          direction,
                                          next_position,
                                          next_direction,
                                          self.env.rail)

        # if the next entry is later than minimum_cell_time, then stop here and
        # move minimum_cell_time before the exit
        # we have to do this since agents in the RailEnv are processed in the step() in the order of their handle
        if next_entry_value > scheduled_at + minimum_cell_time:
            action = ActionPlanElement(scheduled_at, RailEnvActions.STOP_MOVING)
            action_plan.append(action)

            action = ActionPlanElement(next_entry_value - minimum_cell_time, next_action)
            action_plan.append(action)
        else:
            action = ActionPlanElement(scheduled_at, next_action)
            action_plan.append(action)

    def _add_action_plan_elements_for_target_at_path_element_just_before_target(self,
                                                                                action_plan: ActionPlan,
                                                                                minimum_cell_time: int,
                                                                                trainrun_waypoint: TrainrunWaypoint,
                                                                                next_trainrun_waypoint: TrainrunWaypoint):
        scheduled_at = trainrun_waypoint.scheduled_at

        action = ActionPlanElement(scheduled_at + minimum_cell_time, RailEnvActions.STOP_MOVING)
        action_plan.append(action)

    def _add_action_plan_elements_for_first_path_element_of_agent(self,
                                                                  action_plan: ActionPlan,
                                                                  trainrun_waypoint: TrainrunWaypoint,
                                                                  next_trainrun_waypoint: TrainrunWaypoint,
                                                                  minimum_cell_time: int):
        scheduled_at = trainrun_waypoint.scheduled_at
        position = trainrun_waypoint.waypoint.position
        direction = trainrun_waypoint.waypoint.direction
        next_position = next_trainrun_waypoint.waypoint.position
        next_direction = next_trainrun_waypoint.waypoint.direction

        # add intial do nothing if we do not enter immediately, actually not necessary
        if scheduled_at > 0:
            action = ActionPlanElement(0, RailEnvActions.DO_NOTHING)
            action_plan.append(action)

        # add action to enter the grid
        action = ActionPlanElement(scheduled_at, RailEnvActions.MOVE_FORWARD)
        action_plan.append(action)

        next_action = get_action_for_move(position,
                                          direction,
                                          next_position,
                                          next_direction,
                                          self.env.rail)

        # if the agent is blocked in the cell, we have to call stop upon entering!
        if next_trainrun_waypoint.scheduled_at > scheduled_at + 1 + minimum_cell_time:
            action = ActionPlanElement(scheduled_at + 1, RailEnvActions.STOP_MOVING)
            action_plan.append(action)

        # execute the action exactly minimum_cell_time before the entry into the next cell
        action = ActionPlanElement(next_trainrun_waypoint.scheduled_at - minimum_cell_time, next_action)
        action_plan.append(action)
示例#29
0
    def get_pronostico(self, t_seg_epoch):
        #
        # Obtiene valor pronosticado para un instante (t_seg_epoch)
        # 
        prono_val = -1

        # Basado en External
        if ( self.prono_type == 'E'):
            try:
                externo = "python " + self.external_prono + " " + str(t_seg_epoch)
                prono_val = exec_external(externo)
                if ( len(prono_val) < 2 ):
                   prono_val = -1

                t = t_seg_epoch
                print ("*** usando modo External : [", self.external_prono," ] = ", prono_val)
                print ("*** valor estimado de [",self.varname,"] ", " tenant: [", self.tenant,"]")
                print ("*** en t[seg epoch]: ", t, " | hh:mm :", util.get_utc_hora_min(t) )
            except Exception as e:
                print(e)
                prono_val = -1
            return float(prono_val)
  
        # Pronostico basado en Formula
        if ( self.prono_type == 'F'):                   
            t = t_seg_epoch
            try:
                prono_val = eval( self.formula )
                print ("*** usando modo Formula : [", self.formula," ] = ", prono_val)
                print ("*** valor estimado de [",self.varname,"] ", " tenant: [", self.tenant,"]")
                print ("*** en t[seg epoch]: ", t, " | hh:mm:", util.get_utc_hora_min(t) )
            except Exception as e:
                print(e)
                print("Error:\nNo se pudo usar la formula la variable ["+self.varname+"] tenant ["+self.tenant+"]")
                prono_val = -1
            return prono_val

        # Pronostico basado en Filas de Pronosticos
        if ( self.prono_type == 'Q'):                   
            # inicio_i = int(utc_hhmm[:2]) * self.lapso    
            # inicio   = str( inicio_i )                 # HHMM de Inicio del Lapso
            # fin      = str( inicio_i + self.lapso )    # HHMM de Fin del Lapso
            # self.query_pronostico = ('{'
            #                             '"query": {'
            #                             '    "bool": {'
            #                             '    "must": ['
            #                             '        { "term": {"tenant": "'  + self.tenant + '"}}, '
            #                             '        { "term": {"variable": "'+ self.varname + '"}},'
            #                             '        { "range": { "ts_ini": { "gte": "'+inicio+'", "lte": "'+fin+'" } } }'
            #                             '    ]'
            #                             '    }'
            #                             '}}'
            #         )

            t_seg_epoch_str = str(t_seg_epoch)
            self.query_pronostico = ('{ "query": { "bool":  '
                                        '       { "must": [  '
                                        '	{ "match": { "tenant": "'  + self.tenant + '" } }, '
                                        '	{ "match": { "varname": "' + self.varname + '" } }, '
                                        '	{ "range": { "ts_ini": { "lt": "'+t_seg_epoch_str+'" } } }, '
                                        '	{ "range": { "ts_end": { "gt": "'+t_seg_epoch_str+'" } } } '
                                        '		] '
                                        '} } }'
                                     )
            if DEBUG: print ("buscando pronostico con la consulta:", self.query_pronostico, " ...")
            self.pronostico = self.es.search(index='var_prono', doc_type='prono', body=self.query_pronostico )             
            if DEBUG:
                print("\nPronostico para el rango de tiempo de ese instante:")
                pp = pprint.PrettyPrinter(indent=4)
                pp.pprint( self.pronostico )
                print("\n*** Total filas recuperadas:", self.pronostico['hits']['total'] )
            
            self.total_retrieved = self.pronostico['hits']['total']
            if ( self.total_retrieved > 0 ):
                prono_val = self.pronostico['hits']['hits'][0]['_source']['estimated_value']
            else:
                utc_time = util.get_utc_hora_min(t_seg_epoch)
                print("*** No se encontraron pronosticos para:", self.tenant, self.varname, "en epoch:",t_seg_epoch," hora:", utc_time)
                prono_val = -1

        return  prono_val
示例#30
0
        print
        print "Generate a plain-text intermediate form for a LongBow test case generated from"
        print "a specified source and object file.  Use longbow-code to produce a LongBow"
        print "test runner based upon the intermediate form."
        sys.exit(1)

    fileNames = computeFileNames(sys.argv)

    sourceFileName = fileNames[0]
    objectFileName = fileNames[1]
    outputFileName = fileNames[2]

    functionDictionary = getDarwinTestableFunctions(sourceFileName,
                                                    objectFileName)

    testRunnerName = sourceFileNameToName(sourceFileName)

    testFixtures = map(
        lambda (fixtureType): testFixture(
            fixtureType, testSuite(testCases(functionDictionary[fixtureType]))
        ), functionDictionary)

    files = {"sourceFile": sourceFileName, "objectFile": objectFileName}
    result = testRunner(testRunnerName, files, testFixtures)

    out = open(outputFileName, "w")
    pp = pprint.PrettyPrinter(indent=4, width=132, depth=None, stream=out)
    pp.pprint(result)
    out.close()
    pass