예제 #1
0
def __main__():
	context = Context()
	trade()
예제 #2
0
        return "I"

    def four(self):
        return "IV"

    def five(self):
        return "V"

    def nine(self):
        return "IX"

    def multiplier(self):
        return 1


if __name__ == '__main__':
    a = Context('MCMXXVIII')
    b = Expression()
    print(a.input)

    tree = []
    tree.append(ThousandExpression())
    tree.append(HundredExpression())
    tree.append(TenExpression())
    tree.append(OneExpression())

    for it in tree:
        it.interpret(a)

    print(a.output)
예제 #3
0
파일: gui.py 프로젝트: Aristo7/GridMateLab2
def __execute(command, args):

    requestId = args['request_id'] if 'request_id' in args else 0

    print 'command started - {} with args {}'.format(command, args)

    metricsInterface = MetricsContext('gui')
    metricsInterface.set_command_name(command)
    metricsInterface.submit_attempt()

    try:
        argsObj = Args(**args)

        argsObj.no_prompt = True
        argsObj.is_gui = True

        context = Context(metricsInterface, view_class=GuiViewContext)
        context.bootstrap(argsObj)
        context.initialize(argsObj)

        # Deprecated in 1.9. TODO: remove.
        context.hooks.call_module_handlers(
            'cli-plugin-code/resource_commands.py',
            'add_gui_commands',
            args=[command_handlers, argsObj],
            deprecated=True)
        context.hooks.call_module_handlers(
            'cli-plugin-code/resource_commands.py',
            'add_gui_view_commands',
            args=[context.view],
            deprecated=True)

        context.hooks.call_module_handlers(
            'resource-manager-code/command.py',
            'add_gui_commands',
            kwargs={'handlers': command_handlers})

        context.hooks.call_module_handlers(
            'resource-manager-code/command.py',
            'add_gui_view_commands',
            kwargs={'view_context': context.view})

        handler = command_handlers.get(command, None)
        if handler is None:
            raise HandledError('Unknown command: ' + command)

        if handler != project.update_framework_version:
            context.config.verify_framework_version()

        handler(context, argsObj)

        context.view.success()
        metricsInterface.submit_success()

    except HandledError as e:
        metricsInterface.submit_failure()
        msg = str(e)
        print 'command error   - {} when executing command {} with args {}.'.format(
            msg, command, args)
        args['view_output_function'](requestId, 'error', msg)
    except NoCredentialsError:
        metricsInterface.submit_failure()
        msg = 'No AWS credentials were provided.'
        print 'command error   - {} when executing command {} with args {}.'.format(
            msg, command, args)
        args['view_output_function'](requestId, 'error', msg)
    except (EndpointConnectionError, IncompleteReadError, ConnectionError,
            UnknownEndpointError) as e:
        metricsInterface.submit_failure()
        msg = 'We were unable to contact your AWS endpoint.\nERROR: {0}'.format(
            e.message)
        print 'command error   - {} when executing command {} with args {}.'.format(
            msg, command, args)
        args['view_output_function'](requestId, 'error', msg)
    except:
        metricsInterface.submit_failure()
        info = sys.exc_info()
        msg = traceback.format_exception(*info)
        print 'command error   - {} when executing command {} with args {}.'.format(
            msg, command, args)
        args['view_output_function'](requestId, 'error', msg)

    print 'command finished - {} with args {}'.format(command, args)
예제 #4
0
import sys
import os
# from multiprocessing import Pool

from context import Context
from export import Exporter

# program file dir

#  mill line tag as string
line = sys.argv[1]
#  dir definition 传入的 %cd% 即为root dir
root_dir = sys.argv[2].replace("\\", "/")
print(root_dir)
# context init
ctx = Context(root_dir)
# exporter
exporter = Exporter(ctx)

exporter.pond_export()

# if __name__ == '__main__':
# print('Parent process {}.'.format(os.getpid()))
# p = Pool(4)

# p.apply_async(exporter.pond_export, args=("pond",))
# print('Waiting for all subprocesses done...')

# p.close()
# p.join()
# print('All subprocesses done.')
예제 #5
0
from context import Context
from app_exceptions import *
from filters import is_instant_time_rules
from flask import Flask, request, Response, abort, send_from_directory, jsonify
from flask_arango import Arango
from flask_swagger import swagger
import instant
import os
import timeframe


app = Flask(__name__)
app.config.from_object('config.default_settings')
app.config.from_pyfile('eris.cfg', silent=True)
arango = Arango(app)
context = Context(app=app, arango=arango)


@app.route('/ping')
def ping():
    """
Send a ping query
---
tags:
  - ping
parameters:
  - in: query
    name: time
    description: timestamp
    required: false
    type: integer
예제 #6
0
파일: tests.py 프로젝트: abjose/surfsim2
      (maybe) must force things to only be 'dependent' on things higher in
      their own hierarchy (so don't initialize based on some random other 
      thing's position or something), and should initialize variables before
      referencing them...and don't connect things before initializing...
TODO: If going to have many things that take a (list of) input vectors but need
      to operate on only a single output vector...better way of doing?
TODO: Have warnings when variables are made without being prepended by $ or 
      other?
TODO: Why is nothing shown for initialization during copies?
TODO: Could....add a second init step so there's one before connections
      and one after?
TODO: want to change copy_node so that it takes constraints?
"""

# create context
s = Context()

# add stimulus sizes to root node...would be nicer if they went in stimulus node
s.add_rule('init', '$kernel_length = 10', '$output_length = 50',
           '$bcm_radius = 4.', '$stim_size  = 20', '$max_delay  = 3')
# add all grids here? just name differently...
# NOTE: these are one longer than you think - fix?

# add a container for stimulus and 'focus' on it
s.add_node('$name = "stimulus"')
s.set_focus('$name == "stimulus"')

# add a distribution rule for stimulus points
s.add_rule(
    'init',
    # dx and dy were initially 2 for both of these, changing temporarily
예제 #7
0
def image_builder(buildspec):

    BUILDSPEC = Buildspec()
    BUILDSPEC.load(buildspec)
    PRE_PUSH_STAGE_IMAGES = []
    COMMON_STAGE_IMAGES = []

    if "huggingface" in str(BUILDSPEC["framework"]) or "autogluon" in str(
            BUILDSPEC["framework"]):
        os.system("echo login into public ECR")
        os.system(
            "aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com"
        )

    for image_name, image_config in BUILDSPEC["images"].items():
        ARTIFACTS = deepcopy(
            BUILDSPEC["context"]) if BUILDSPEC.get("context") else {}

        extra_build_args = {}
        labels = {}
        enable_datetime_tag = parse_dlc_developer_configs(
            "build", "datetime_tag")

        if image_config.get("version") is not None:
            if BUILDSPEC["version"] != image_config.get("version"):
                continue

        if image_config.get("context") is not None:
            ARTIFACTS.update(image_config["context"])

        image_tag = (tag_image_with_pr_number(image_config["tag"])
                     if build_context == "PR" else image_config["tag"])
        if enable_datetime_tag or build_context != "PR":
            image_tag = tag_image_with_datetime(image_tag)
        image_repo_uri = (image_config["repository"] if build_context == "PR"
                          else modify_repository_name_for_context(
                              str(image_config["repository"]), build_context))
        base_image_uri = None
        if image_config.get("base_image_name") is not None:
            base_image_object = _find_image_object(
                PRE_PUSH_STAGE_IMAGES, image_config["base_image_name"])
            base_image_uri = base_image_object.ecr_url

        if image_config.get("download_artifacts") is not None:
            for artifact_name, artifact in image_config.get(
                    "download_artifacts").items():
                type = artifact["type"]
                uri = artifact["URI"]
                var = artifact["VAR_IN_DOCKERFILE"]

                try:
                    file_name = utils.download_file(uri, type).strip()
                except ValueError:
                    FORMATTER.print(
                        f"Artifact download failed: {uri} of type {type}.")

                ARTIFACTS.update({
                    f"{artifact_name}": {
                        "source":
                        f"{os.path.join(os.sep, os.path.abspath(os.getcwd()), file_name)}",
                        "target": file_name
                    }
                })

                extra_build_args[var] = file_name
                labels[var] = file_name
                labels[f"{var}_URI"] = uri

        if str(BUILDSPEC["framework"]).startswith("huggingface"):
            if "transformers_version" in image_config:
                extra_build_args["TRANSFORMERS_VERSION"] = image_config.get(
                    "transformers_version")
            else:
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'transformers_version' field for each image"
                )
            if "datasets_version" in image_config:
                extra_build_args["DATASETS_VERSION"] = image_config.get(
                    "datasets_version")
            elif str(image_config["image_type"]) == "training":
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'datasets_version' field for each image"
                )

        ARTIFACTS.update({
            "dockerfile": {
                "source": image_config["docker_file"],
                "target": "Dockerfile",
            }
        })

        context = Context(ARTIFACTS, f"build/{image_name}.tar.gz",
                          image_config["root"])

        if "labels" in image_config:
            labels.update(image_config.get("labels"))
        """
        Override parameters from parent in child.
        """

        info = {
            "account_id":
            str(BUILDSPEC["account_id"]),
            "region":
            str(BUILDSPEC["region"]),
            "framework":
            str(BUILDSPEC["framework"]),
            "version":
            str(BUILDSPEC["version"]),
            "root":
            str(image_config["root"]),
            "name":
            str(image_name),
            "device_type":
            str(image_config["device_type"]),
            "python_version":
            str(image_config["python_version"]),
            "image_type":
            str(image_config["image_type"]),
            "image_size_baseline":
            int(image_config["image_size_baseline"]),
            "base_image_uri":
            base_image_uri,
            "enable_test_promotion":
            image_config.get("enable_test_promotion", True),
            "labels":
            labels,
            "extra_build_args":
            extra_build_args
        }

        # Create pre_push stage docker object
        pre_push_stage_image_object = DockerImage(
            info=info,
            dockerfile=image_config["docker_file"],
            repository=image_repo_uri,
            tag=append_tag(image_tag, "pre-push"),
            to_build=image_config["build"],
            stage=constants.PRE_PUSH_STAGE,
            context=context,
            additional_tags=[image_tag],
        )

        ##### Create Common stage docker object #####
        # If for a pre_push stage image we create a common stage image, then we do not push the pre_push stage image
        # to the repository. Instead, we just push its common stage image to the repository. Therefore,
        # inside function get_common_stage_image_object we make pre_push_stage_image_object non pushable.
        # common_stage_image_object = generate_common_stage_image_object(pre_push_stage_image_object, image_tag)
        # COMMON_STAGE_IMAGES.append(common_stage_image_object)

        PRE_PUSH_STAGE_IMAGES.append(pre_push_stage_image_object)
        FORMATTER.separator()

    FORMATTER.banner("DLC")

    # Parent images do not inherit from any containers built in this job
    # Child images use one of the parent images as their base image
    parent_images = [
        image for image in PRE_PUSH_STAGE_IMAGES if not image.is_child_image
    ]
    child_images = [
        image for image in PRE_PUSH_STAGE_IMAGES if image.is_child_image
    ]
    ALL_IMAGES = PRE_PUSH_STAGE_IMAGES + COMMON_STAGE_IMAGES
    IMAGES_TO_PUSH = [
        image for image in ALL_IMAGES if image.to_push and image.to_build
    ]

    pushed_images = []
    pushed_images += process_images(parent_images, "Parent/Independent")
    pushed_images += process_images(child_images, "Child/Dependent")

    assert all(image in pushed_images
               for image in IMAGES_TO_PUSH), "Few images could not be pushed."

    # After the build, display logs/summary for all the images.
    FORMATTER.banner("Summary")
    show_build_info(ALL_IMAGES)

    FORMATTER.banner("Errors")
    is_any_build_failed, is_any_build_failed_size_limit = show_build_errors(
        ALL_IMAGES)

    # From all images, filter the images that were supposed to be built and upload their metrics
    BUILT_IMAGES = [image for image in ALL_IMAGES if image.to_build]

    FORMATTER.banner("Upload Metrics")
    upload_metrics(BUILT_IMAGES, BUILDSPEC, is_any_build_failed,
                   is_any_build_failed_size_limit)

    FORMATTER.banner("Test Env")
    # Set environment variables to be consumed by test jobs
    test_trigger_job = utils.get_codebuild_project_name()
    # Tests should only run on images that were pushed to the repository
    utils.set_test_env(IMAGES_TO_PUSH,
                       use_latest_additional_tag=True,
                       BUILD_CONTEXT=os.getenv("BUILD_CONTEXT"),
                       TEST_TRIGGER=test_trigger_job)
예제 #8
0
#!/usr/bin/env python
import json
import logging
import os
from multiprocessing import Process, Queue
import requests
from requests_toolbelt import MultipartEncoder
import sys
import time
import yaml
from bottle import route, run, request, abort

# the safe-thread store that is shared across components
#
from context import Context
context = Context()
context.set('plumby.version', '0.2 alpha')

# the queue of updates to be sent to Cisco Spark, processed by a Sender
#
mouth = Queue()

# the queue of reports from a Worker, processed by a Speaker
#
outbox = Queue()

# the queue of activities for a Worker, feeded by a Listener and Shell
#
inbox = Queue()

# the streams of information coming from Cisco Spark, handled by a Listener
예제 #9
0
 def get_sp(self):
     return Context().arch.get_sp()
예제 #10
0
파일: pyqx.py 프로젝트: hansroh/pyqx
def readCSS(fname):

	f = open(fname, 'r')
	s = f.read()
	f.close()

	return s


if __name__ == '__main__':

	app = QtWidgets.QApplication(sys.argv)
	#app.setWindowIcon(QtWidgets.QIcon("images/icon.png"))

	sys.setrecursionlimit(4096*4096)

	customFnt = "Lato-Reg.ttf"
	if QtGui.QFontDatabase().addApplicationFont(os.path.join("fonts", customFnt)) < 0:
		print("Warning: Could not load custom font" + customFnt + ", falling back to default font.")
	else:
		fnt = QtGui.QFont("Lato", 10)
		app.setFont(fnt)

	signals = Signals()
	context = Context(signals)

	mw = MainWindow(context, signals)
	mw.setStyleSheet(readCSS(os.path.join("themes", "algae", "style.css")))

	sys.exit(app.exec_())
예제 #11
0
    except Exception as e:
        print("Error: Parameters not found in tv_entry", e)
        exit(1)

    v_thing_ID = thing_visor_ID + "/" + v_thing_name
    v_thing = {
        "label": v_thing_label,
        "id": v_thing_ID,
        "description": v_thing_description
    }

    sub_rn = v_thing_ID.replace("/", ":") + "_subF4I"
    vtype = ""

    # Context is a "map" of current virtual thing state
    context_vThing = Context()
    # mapping of virtual thing with its context object. Useful in case of multiple virtual things
    contexts = {v_thing_ID: context_vThing}

    port_mapping = db[thing_visor_collection].find_one(
        {"thingVisorID": thing_visor_ID}, {
            "port": 1,
            "_id": 0
        })
    poa_IP_dict = db[thing_visor_collection].find_one(
        {"thingVisorID": thing_visor_ID}, {
            "IP": 1,
            "_id": 0
        })
    poa_IP = str(poa_IP_dict['IP'])
    poa_port = port_mapping['port'][str(flask_port) + '/tcp']
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "file",
        help=
        "File(s) or folder(s) you wanna run the parser on. If no file provided, runs on current folder.",
        default=[],
        action='append',
        nargs='*')
    parser.add_argument("-d",
                        "--debug",
                        action="count",
                        help="Debug output (multiple values available)",
                        default=0)
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='norminette ' + str(__version__))
    #parser.add_argument('-s', '--sentry', action='store_true', default=False)
    parser.add_argument(
        '--cfile',
        action='store',
        help="Store C file content directly instead of filename")
    parser.add_argument(
        '--hfile',
        action='store',
        help="Store header file content directly instead of filename")
    args = parser.parse_args()
    registry = Registry()
    targets = []
    has_err = None
    content = None

    debug = args.debug
    #if args.sentry == True:
    #sentry_sdk.init("https://[email protected]/72")
    if args.cfile != None or args.hfile != None:
        targets = ['file.c'] if args.cfile else ['file.h']
        content = args.cfile if args.cfile else args.hfile
    else:
        args.file = args.file[0]
        if args.file == [[]] or args.file == []:
            targets = glob.glob("**/*.[ch]", recursive=True)
            target = targets.sort()
        else:
            for arg in args.file:
                if os.path.exists(arg) is False:
                    print(f"'{arg}' no such file or directory")
                elif os.path.isdir(arg):
                    if arg[-1] != '/':
                        arg = arg + '/'
                    targets.extend(glob.glob(arg + '**/*.[ch]',
                                             recursive=True))
                elif os.path.isfile(arg):
                    targets.append(arg)
    event = []
    for target in targets:
        if target[-2:] not in [".c", ".h"]:
            print(f"{arg} is not valid C or C header file")
        else:
            with configure_scope() as scope:
                scope.set_extra("File", target)
            try:
                event.append(Event())
                #if args.sentry == True:
                #    proc = Thread(target=timeout, args=(event[-1], 5, ))
                #    proc.daemon = True
                #    proc.start()
                if content == None:
                    with open(target) as f:
                        #print ("Running on", target)
                        source = f.read()
                else:
                    source = content
                lexer = Lexer(source)
                tokens = lexer.get_tokens()
                context = Context(target, tokens, debug)
                registry.run(context, source)
                event[-1].set()
                if context.errors is not []:
                    has_err = True
            # except (TokenError, CParsingError) as e:
            except TokenError as e:
                has_err = True
                print(target + f": KO!\n\t{colors(e.msg, 'red')}")
                event[-1].set()
            except CParsingError as e:
                has_err = True
                print(target + f": KO!\n\t{colors(e.msg, 'red')}")
                event[-1].set()
            except KeyboardInterrupt as e:
                event[-1].set()
                sys.exit(1)
    sys.exit(1 if has_err else 0)
예제 #13
0
import pygame

from player import Player
from context import Context
from constants import *
from worldmap import ShortestPath

numplayers = 2
testplayers = []

testplayers.append(('DiGiTALQU33F', 'test_guy_1'))
testplayers.append(('iRONVAGiNA', 'test_guy_2'))

context = Context(numplayers, testplayers)

print '\n#################### STARTING BASIC TEST ####################'

print '\n############ Locations ############\n'
for loc in context.world.locations:
    print 'Location: %s Players: %s' % (loc, context.world.locations[loc])

print '\n############# Players #############\n'

player_list = []

for player in context.players:
    player_list.append(player.name)

print 'Context contains players %s' % player_list

print '\n######## Testing Dijkstras ########\n'
예제 #14
0
from context import Context

with Context('hiep') as c:
    print('Everything done!')
예제 #15
0
    def build_query_context(self, last_context, entities, properties, literal_objects):
        """
        builds the new context structure (connects old entities with new ones, adds filters, determines predicates)
        returns the generated context
        """
        if last_context == None:
            # TODO: should also try to match whatever it can (something better than nothing).
            #           connect_subject_predicate_literal tries to be very sure and precise, so is not the best option here
            entities, properties, unassigned_literals = self.connect_subject_predicate_literal(entities, properties, literal_objects)
            return Context(all_entities=entities)

        # Cloned context contains a disassociate list, but the entities objects are still the same!
        # new_context = Context.clone(last_context)
        new_context = Context(all_entities=entities)

        # minimum confidence necessary to be accepted as valid predicate
        confidence_threshold = 0.3


        #new_context.all_entities += entities

        for new_entity in entities:
            # the max confidence of the highest rated (best_predicate, best_old_entity) tuple
            max_confidence = confidence_threshold

            # entity of the old context which relates the most with the new entity
            best_old_entity = None

            # predicate with which the best_old_entity relates to the new_entity
            best_predicate = None

            # True if relationship points from new_entity to old_entity
            should_reverse = False

            # if confidence is high enough (e.g. > 0.95) we don't look for more predicates so that we speed up the search
            sure_enough_confidence = 0.95


            # look for entities in the old context
            for old_entity in last_context.all_entities:
                # look for the best matching predicate. Gives the priority to predicates pointing from old to new instead of new->old
                # the subject and object must be inverted if the relationship is new pointing to old entity, instead of old pointing to new
                predicate, confidence, should_reverse = self.get_best_fitting_predicate(old_entity, new_entity)

                if confidence > max_confidence:
                    best_predicate = predicate
                    best_old_entity = old_entity
                    max_confidence = confidence

                # if we are sure enough we stop looking for connections between other old_entities
                if max_confidence > sure_enough_confidence:
                    break

            if best_predicate == None:
                print('no predicate found for ' + new_entity.get_type())
            else:
                # by default old points at new one
                if should_reverse == False:
                    best_old_entity.add_spo(best_predicate, new_entity)
                else:
                    new_entity.add_spo(best_predicate, best_old_entity)


        # at least every subject which is not filtered (i.e. has just the rdf:type SPO) is a target
        # as a consequence we also have to get the default properties for that target entity, since we do not want to extract its IRI
        for entity in new_context.all_entities:
            if entity.count_spos() == 0:
                for target_property in entity.entity_descriptor.default_properties:
                    # if object of a SPO is equal to None, then it means is not filtered, therefore the user wants to extract this information
                    entity.add_spo(target_property, None)

        """
        Remove any SPO of the old entities that was a target, unless the property is one of the defaults of the object!
        If the entity has no more SPOs then it means that it became useless and should not be added.
        If the entity has at least one SPO remaining then it means it has been connected to something of the new context

        e.g. Before we selected name and plot of some movies. After we ask for the director. Name of movie should stay because is part of default props.
               Plot must disappear because no new entity has been connected to the plot (assuming plot was an IRI and not a literal).

        TODO: problem when the relationship is reversed (look above, when should_reverse=True).
                Because then we maybe remove all SPOs of an old entity that has been successfully linked with a new entity!
        """
        for entity in last_context.all_entities:
            # Pseudo-code for what is missing here
            # if entity is_object_of any_new_entity:
            #   new_context.all_entities.append(entity)
            #   continue (skip the rest of the for-loop)
            # TODO: make an example of when this would happen (should_reverse == True), but yeah: when does should_reverse happen?
            # e.g. 'Show me the age of Brad Pitt' -- then --> 'Show movies starring him' ==> should_reverse = true

            for spo in entity.spos:
                if spo.o == None:
                    pass
                    #TODO: this partially works
                    #entity.spos.remove(spo)
            if len(entity.spos) > 0:
                new_context.all_entities.append(entity)
            else:
                # TODO: the entity with no more SPOs should be removed at all. However, have a look at the todo above this for-cycle
                pass
        return new_context
예제 #16
0
 def get_pc(self):
     return Context().arch.get_pc()
예제 #17
0
 def specialize(self, d={}, **kw):
     if kw:
         kw.update(d)
         d = kw
     st, dsql, args = self.fn(Context(d, full_eval=False))
     return DynSql(dsql, args, self.env)
예제 #18
0
        refresh_rate = params["rate"]
    else:
        refresh_rate = 300

    v_things = []
    for city in cities:
        for sens in sensors:
            thing = sens["thing"]
            label = sens["thing"] + " in " + str(city)
            identifier = thing_visor_ID + "/" + city+sens["id"]
            description = sens["description"]
            topic = v_thing_prefix + "/" + identifier
            v_things.append({"vThing": {"label": label, "id": identifier, "description": description},
                             "topic": v_thing_prefix + "/" + identifier, "type": sens["type"],
                             "dataType": sens["dataType"], "city": city, "thing": thing})
            contexts[identifier] = Context()

    port_mapping = db[thing_visor_collection].find_one({"thingVisorID": thing_visor_ID}, {"port": 1, "_id": 0})
    print("port mapping: " + str(port_mapping))

    data_thread = FetcherThread()  # Thread used to fetch data
    data_thread.start()

    mqtt_control_thread = mqttControlThread()  # mqtt control thread
    mqtt_control_thread.start()

    mqtt_data_thread = mqttDataThread()  # mqtt data thread
    mqtt_data_thread.start()
    while True:
        try:
            time.sleep(3)
예제 #19
0
    def __init__(self, parent, config_yaml):
        super(QtDandere2xThread, self).__init__(parent)

        context = Context(config_yaml)
        self.dandere2x = Dandere2x(context)
예제 #20
0
    pos = validate_input('position', linked_list.length_of_linked_list())
    file_name = validate_file_input()
    linked_list = context.do(linked_list, pos, file_name)
    return linked_list


linked_list_first = LinkedList()
linked_list_second = LinkedList()

linked_list_first.event.add(Observer('add', Logger.write_to_file))
linked_list_first.event.add(Observer('delete', Logger.write_to_file))

linked_list_second.event.add(Observer('add', Logger.write_to_file))
linked_list_second.event.add(Observer('delete', Logger.write_to_file))

context = Context(1)

print(' linked_list_first: ')
linked_list_first = create_first_strategy(linked_list_first)
context.change_strategy(2)
print('linked_list_second: ')
linked_list_second = create_second_strategy(linked_list_second)


def countprimeee(linked_list: LinkedList):
    linked_list.countPrime()


while True:
    print('''Choose option:
    1.) first Strategy for adding in list
예제 #21
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""

import numpy as np
import tensorflow as tf

from model import Skipgram
from context import Context

if __name__ == "__main__":
    context_file_path = "data/info.txt"

    c = Context(context_file_path)
    sc = c.spatial_context(delta=0.02)
    # sc = c.true_context()

    for pair in sc[0:5]:
        print pair

    input_data, label_data = c.target_context_pairs(sc)
    init_embed = np.loadtxt("resource/embeddings/docs/trigram-tfidf-vecs.txt",
                            delimiter=",")
    event_size = init_embed.shape[0]
    embed_size = init_embed.shape[1]
    print "embed size", embed_size
    print "data size", len(input_data)

    event2vec = Skipgram(event_size,
                         embed_size,
예제 #22
0
import tensorflow as tf

from context import Context
from transformer.sim_hparams import SimHparams as Hparams
from transformer.sim_model import SimTransformer
from transformer.utils import load_hparams, save_operation_specs

logger = logging.getLogger()
logger.setLevel(logging.INFO)

os.environ['CUDA_VISIBLE_DEVICES'] = "-1"

logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.logdir)
context = Context(hp)

m = SimTransformer(context)
xs = (tf.placeholder(dtype=tf.int32, shape=[None, None], name="xs"), tf.constant(0), tf.constant(0))
vec = m.get_encodec(xs)

saver = tf.train.Saver()
with tf.Session() as sess:
    ckpt = tf.train.latest_checkpoint(hp.logdir)
    saver.restore(sess, ckpt)
    graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names=["seq2vec/vec"])
    tf.train.write_graph(graph_def, './model', '%s.pb' % hp.pb_name, as_text=False)
    save_operation_specs(os.path.join("./model", '%s.ops' % hp.pb_name))
        help="Output the raw probabilties from the network instead of converting them to a segmentation map"
    )
    parser.set_defaults(output_probabilities=False)
    args = parser.parse_args()

    if args.device.startswith("cuda"):
        if torch.cuda.is_available():
            device = torch.device(args.device)
        else:
            device = torch.device("cpu")
            print("cuda not available, switched to cpu")
    else:
        device = torch.device(args.device)
    print("using device", device)

    context = Context(device, file_name=args.model_path, variables=dict(DATASET_FOLDER=args.dataset_path))

    # Fix torchio deprecating something...
    fixed_transform = tio.Compose([
        tio.Crop((62, 62, 70, 58, 0, 0)),
        tio.RescaleIntensity((-1, 1), (0.5, 99.5)),
        tio.Pad((0, 0, 0, 0, 2, 2)),
        tio.ZNormalization(),
    ])
    context.dataset.subjects_dataset.subject_dataset.set_transform(fixed_transform)

    if args.out_folder != "" and not os.path.exists(args.out_folder):
        print(args.out_folder, "does not exist. Creating it.")
        os.makedirs(args.out_folder)

    total = len(context.dataset) // 2
예제 #24
0
    e = Expression('not ("500" matches /\\d+/)', c)
    print(e())

    e = Expression('bar["a"]', c)
    print(e())

    # c.push_frame('word')


#    from time import time
#    start = time()
#    e = c.compile("None")
#    print e()
# for _ in xrange(10000):
#    e()

if __name__ == "__main__":
    # main()

    from moya.context import Context

    c = Context({"name": "Will"})
    c[".develop"] = True
    print(c.eval("{upper:name}"))

    print(c.eval('{upper:name}(name="will")'))

    exp = """list:map:[['one', 'two', 'three'], {upper:$$}]"""
    print(c.eval(exp))
예제 #25
0
 def init_new_game(self):
     self.context = Context()
     self.example_manager.remove()
     self.model_manager.remove()
 def __init__(self):
     self._opts = {}
     self._in_robot = Context().in_robot()
     if self._new_called == 1:
         self._populate_opts(self._in_robot)
예제 #27
0
def image_builder(buildspec):
    FORMATTER = OutputFormatter(constants.PADDING)

    BUILDSPEC = Buildspec()
    BUILDSPEC.load(buildspec)
    IMAGES = []

    if "huggingface" in str(BUILDSPEC["framework"]):
        os.system("echo login into public ECR")
        os.system(
            "aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com"
        )

    for image_name, image_config in BUILDSPEC["images"].items():
        ARTIFACTS = deepcopy(
            BUILDSPEC["context"]) if BUILDSPEC.get("context") else {}

        extra_build_args = {}
        labels = {}
        enable_datetime_tag = parse_dlc_developer_configs(
            "build", "datetime_tag")

        if image_config.get("version") is not None:
            if BUILDSPEC["version"] != image_config.get("version"):
                continue

        if image_config.get("context") is not None:
            ARTIFACTS.update(image_config["context"])

        build_context = os.getenv("BUILD_CONTEXT")
        image_tag = (tag_image_with_pr_number(image_config["tag"])
                     if build_context == "PR" else image_config["tag"])
        if enable_datetime_tag or build_context != "PR":
            image_tag = tag_image_with_datetime(image_tag)
        image_repo_uri = (image_config["repository"] if build_context == "PR"
                          else modify_repository_name_for_context(
                              str(image_config["repository"]), build_context))
        base_image_uri = None
        if image_config.get("base_image_name") is not None:
            base_image_object = _find_image_object(
                IMAGES, image_config["base_image_name"])
            base_image_uri = base_image_object.ecr_url

        if image_config.get("download_artifacts") is not None:
            for artifact_name, artifact in image_config.get(
                    "download_artifacts").items():
                type = artifact["type"]
                uri = artifact["URI"]
                var = artifact["VAR_IN_DOCKERFILE"]

                try:
                    file_name = utils.download_file(uri, type).strip()
                except ValueError:
                    FORMATTER.print(
                        f"Artifact download failed: {uri} of type {type}.")

                ARTIFACTS.update({
                    f"{artifact_name}": {
                        "source":
                        f"{os.path.join(os.sep, os.path.abspath(os.getcwd()), file_name)}",
                        "target": file_name
                    }
                })

                extra_build_args[var] = file_name
                labels[var] = file_name
                labels[f"{var}_URI"] = uri

        if str(BUILDSPEC["framework"]).startswith("huggingface"):
            if "transformers_version" in image_config:
                extra_build_args["TRANSFORMERS_VERSION"] = image_config.get(
                    "transformers_version")
            else:
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'transformers_version' field for each image"
                )
            if "datasets_version" in image_config:
                extra_build_args["DATASETS_VERSION"] = image_config.get(
                    "datasets_version")
            elif str(image_config["image_type"]) == "training":
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'datasets_version' field for each image"
                )

        ARTIFACTS.update({
            "dockerfile": {
                "source": image_config["docker_file"],
                "target": "Dockerfile",
            }
        })

        context = Context(ARTIFACTS, f"build/{image_name}.tar.gz",
                          image_config["root"])

        if "labels" in image_config:
            labels.update(image_config.get("labels"))
        """
        Override parameters from parent in child.
        """

        info = {
            "account_id": str(BUILDSPEC["account_id"]),
            "region": str(BUILDSPEC["region"]),
            "framework": str(BUILDSPEC["framework"]),
            "version": str(BUILDSPEC["version"]),
            "root": str(image_config["root"]),
            "name": str(image_name),
            "device_type": str(image_config["device_type"]),
            "python_version": str(image_config["python_version"]),
            "image_type": str(image_config["image_type"]),
            "image_size_baseline": int(image_config["image_size_baseline"]),
            "base_image_uri": base_image_uri,
            "labels": labels,
            "extra_build_args": extra_build_args
        }

        image_object = DockerImage(
            info=info,
            dockerfile=image_config["docker_file"],
            repository=image_repo_uri,
            tag=image_tag,
            to_build=image_config["build"],
            context=context,
        )

        IMAGES.append(image_object)

    FORMATTER.banner("DLC")
    FORMATTER.title("Status")

    THREADS = {}

    # In the context of the ThreadPoolExecutor each instance of image.build submitted
    # to it is executed concurrently in a separate thread.
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        # Standard images must be built before example images
        # Example images will use standard images as base
        standard_images = [
            image for image in IMAGES if "example" not in image.name.lower()
        ]
        example_images = [
            image for image in IMAGES if "example" in image.name.lower()
        ]

        for image in standard_images:
            THREADS[image.name] = executor.submit(image.build)

        # the FORMATTER.progress(THREADS) function call also waits until all threads have completed
        FORMATTER.progress(THREADS)

        for image in example_images:
            THREADS[image.name] = executor.submit(image.build)

        # the FORMATTER.progress(THREADS) function call also waits until all threads have completed
        FORMATTER.progress(THREADS)

        FORMATTER.title("Build Logs")

        if not os.path.isdir("logs"):
            os.makedirs("logs")

        for image in IMAGES:
            FORMATTER.title(image.name)
            FORMATTER.table(image.info.items())
            FORMATTER.separator()
            FORMATTER.print_lines(image.log)
            with open(f"logs/{image.name}", "w") as fp:
                fp.write("/n".join(image.log))
                image.summary["log"] = f"logs/{image.name}"

        FORMATTER.title("Summary")

        for image in IMAGES:
            FORMATTER.title(image.name)
            FORMATTER.table(image.summary.items())

        FORMATTER.title("Errors")
        is_any_build_failed = False
        is_any_build_failed_size_limit = False
        for image in IMAGES:
            if image.build_status == constants.FAIL:
                FORMATTER.title(image.name)
                FORMATTER.print_lines(image.log[-10:])
                is_any_build_failed = True
            else:
                if image.build_status == constants.FAIL_IMAGE_SIZE_LIMIT:
                    is_any_build_failed_size_limit = True
        if is_any_build_failed:
            raise Exception("Build failed")
        else:
            if is_any_build_failed_size_limit:
                FORMATTER.print("Build failed. Image size limit breached.")
            else:
                FORMATTER.print("No errors")

        FORMATTER.title("Uploading Metrics")
        metrics = Metrics(
            context=constants.BUILD_CONTEXT,
            region=BUILDSPEC["region"],
            namespace=constants.METRICS_NAMESPACE,
        )
        for image in IMAGES:
            try:
                metrics.push_image_metrics(image)
            except Exception as e:
                if is_any_build_failed or is_any_build_failed_size_limit:
                    raise Exception(f"Build failed.{e}")
                else:
                    raise Exception(f"Build passed. {e}")

        if is_any_build_failed_size_limit:
            raise Exception("Build failed because of file limit")

        FORMATTER.separator()

        # Set environment variables to be consumed by test jobs
        test_trigger_job = utils.get_codebuild_project_name()
        utils.set_test_env(
            IMAGES,
            BUILD_CONTEXT=os.getenv("BUILD_CONTEXT"),
            TEST_TRIGGER=test_trigger_job,
        )
예제 #28
0
def fb(ctx, path, router, fakepath):
    context = Context(path,router, fakepath)
    context.jsonConfig = load_json_config(context)
    ctx.obj = context
예제 #29
0
 def __init__(self, config_json):
     self.config_json = config_json
     self.context = Context(config_json)
def image_builder(buildspec):

    BUILDSPEC = Buildspec()
    BUILDSPEC.load(buildspec)
    PRE_PUSH_STAGE_IMAGES = []
    COMMON_STAGE_IMAGES = []

    if "huggingface" in str(BUILDSPEC["framework"]) or "autogluon" in str(
            BUILDSPEC["framework"]) or "trcomp" in str(BUILDSPEC["framework"]):
        os.system("echo login into public ECR")
        os.system(
            "aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com"
        )

    for image_name, image_config in BUILDSPEC["images"].items():
        ARTIFACTS = deepcopy(
            BUILDSPEC["context"]) if BUILDSPEC.get("context") else {}

        extra_build_args = {}
        labels = {}
        enable_datetime_tag = parse_dlc_developer_configs(
            "build", "datetime_tag")

        if image_config.get("version") is not None:
            if BUILDSPEC["version"] != image_config.get("version"):
                continue

        if image_config.get("context") is not None:
            ARTIFACTS.update(image_config["context"])

        image_tag = tag_image_with_pr_number(
            image_config["tag"]
        ) if build_context == "PR" else image_config["tag"]
        if enable_datetime_tag or build_context != "PR":
            image_tag = tag_image_with_datetime(image_tag)
        image_repo_uri = (image_config["repository"] if build_context == "PR"
                          else modify_repository_name_for_context(
                              str(image_config["repository"]), build_context))
        base_image_uri = None
        if image_config.get("base_image_name") is not None:
            base_image_object = _find_image_object(
                PRE_PUSH_STAGE_IMAGES, image_config["base_image_name"])
            base_image_uri = base_image_object.ecr_url

        if image_config.get("download_artifacts") is not None:
            for artifact_name, artifact in image_config.get(
                    "download_artifacts").items():
                type = artifact["type"]
                uri = artifact["URI"]
                var = artifact["VAR_IN_DOCKERFILE"]

                try:
                    file_name = utils.download_file(uri, type).strip()
                except ValueError:
                    FORMATTER.print(
                        f"Artifact download failed: {uri} of type {type}.")

                ARTIFACTS.update({
                    f"{artifact_name}": {
                        "source":
                        f"{os.path.join(os.sep, os.path.abspath(os.getcwd()), file_name)}",
                        "target": file_name,
                    }
                })

                extra_build_args[var] = file_name
                labels[var] = file_name
                labels[f"{var}_URI"] = uri

        transformers_version = image_config.get("transformers_version")

        if str(BUILDSPEC["framework"]).startswith("huggingface") or str(
                BUILDSPEC["framework"]).endswith("trcomp"):
            if transformers_version:
                extra_build_args["TRANSFORMERS_VERSION"] = transformers_version
            else:
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'transformers_version' field for each image"
                )
            if "datasets_version" in image_config:
                extra_build_args["DATASETS_VERSION"] = image_config.get(
                    "datasets_version")
            elif str(image_config["image_type"]) == "training":
                raise KeyError(
                    f"HuggingFace buildspec.yml must contain 'datasets_version' field for each image"
                )

        ARTIFACTS.update({
            "dockerfile": {
                "source": image_config["docker_file"],
                "target": "Dockerfile",
            }
        })

        context = Context(ARTIFACTS, f"build/{image_name}.tar.gz",
                          image_config["root"])

        if "labels" in image_config:
            labels.update(image_config.get("labels"))
            for label, value in labels.items():
                if isinstance(value, bool):
                    labels[label] = str(value)

        cx_type = utils.get_label_prefix_customer_type(image_tag)

        # Define label variables
        label_framework = str(BUILDSPEC['framework']).replace('_', '-')
        if image_config.get("framework_version"):
            label_framework_version = str(
                image_config['framework_version']).replace('.', '-')
        else:
            label_framework_version = str(BUILDSPEC['version']).replace(
                '.', '-')
        label_device_type = str(image_config['device_type'])
        if label_device_type == "gpu":
            label_device_type = f"{label_device_type}.{str(image_config['cuda_version'])}"
        label_arch = str(BUILDSPEC['arch_type'])
        label_python_version = str(image_config['tag_python_version'])
        label_os_version = str(image_config.get('os_version')).replace(
            '.', '-')
        label_contributor = str(BUILDSPEC.get('contributor'))
        label_transformers_version = str(transformers_version).replace(
            '.', '-')

        # job_type will be either inference or training, based on the repo URI
        if "training" in image_repo_uri:
            label_job_type = "training"
        elif "inference" in image_repo_uri:
            label_job_type = "inference"
        else:
            raise RuntimeError(
                f"Cannot find inference or training job type in {image_repo_uri}. "
                f"This is required to set job_type label.")

        if cx_type == "sagemaker":
            # Adding standard labels to all images
            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.framework.{label_framework}.{label_framework_version}"] = "true"
            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.device.{label_device_type}"] = "true"
            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.arch.{label_arch}"] = "true"
            # python version label will look like py_version.py36, for example
            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.python.{label_python_version}"] = "true"
            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.os.{label_os_version}"] = "true"

            labels[
                f"com.amazonaws.ml.engines.{cx_type}.dlc.job.{label_job_type}"] = "true"

            if label_contributor:
                labels[
                    f"com.amazonaws.ml.engines.{cx_type}.dlc.contributor.{label_contributor}"] = "true"
            if transformers_version:
                labels[
                    f"com.amazonaws.ml.engines.{cx_type}.dlc.lib.transformers.{label_transformers_version}"] = "true"
        """
        Override parameters from parent in child.
        """

        info = {
            "account_id":
            str(BUILDSPEC["account_id"]),
            "region":
            str(BUILDSPEC["region"]),
            "framework":
            str(BUILDSPEC["framework"]),
            "version":
            str(BUILDSPEC["version"]),
            "root":
            str(image_config["root"]),
            "name":
            str(image_name),
            "device_type":
            str(image_config["device_type"]),
            "python_version":
            str(image_config["python_version"]),
            "image_type":
            str(image_config["image_type"]),
            "image_size_baseline":
            int(image_config["image_size_baseline"]),
            "base_image_uri":
            base_image_uri,
            "enable_test_promotion":
            image_config.get("enable_test_promotion", True),
            "labels":
            labels,
            "extra_build_args":
            extra_build_args,
        }

        # Create pre_push stage docker object
        pre_push_stage_image_object = DockerImage(
            info=info,
            dockerfile=image_config["docker_file"],
            repository=image_repo_uri,
            tag=append_tag(image_tag, "pre-push"),
            to_build=image_config["build"],
            stage=constants.PRE_PUSH_STAGE,
            context=context,
            additional_tags=[image_tag],
            target=image_config.get("target"),
        )

        ##### Create Common stage docker object #####
        # If for a pre_push stage image we create a common stage image, then we do not push the pre_push stage image
        # to the repository. Instead, we just push its common stage image to the repository. Therefore,
        # inside function get_common_stage_image_object we make pre_push_stage_image_object non pushable.
        common_stage_image_object = generate_common_stage_image_object(
            pre_push_stage_image_object, image_tag)
        COMMON_STAGE_IMAGES.append(common_stage_image_object)

        PRE_PUSH_STAGE_IMAGES.append(pre_push_stage_image_object)
        FORMATTER.separator()

    FORMATTER.banner("DLC")

    # Parent images do not inherit from any containers built in this job
    # Child images use one of the parent images as their base image
    parent_images = [
        image for image in PRE_PUSH_STAGE_IMAGES if not image.is_child_image
    ]
    child_images = [
        image for image in PRE_PUSH_STAGE_IMAGES if image.is_child_image
    ]
    ALL_IMAGES = PRE_PUSH_STAGE_IMAGES + COMMON_STAGE_IMAGES
    IMAGES_TO_PUSH = [
        image for image in ALL_IMAGES if image.to_push and image.to_build
    ]

    pushed_images = []
    pushed_images += process_images(parent_images, "Parent/Independent")
    pushed_images += process_images(child_images, "Child/Dependent")

    assert all(image in pushed_images
               for image in IMAGES_TO_PUSH), "Few images could not be pushed."

    # After the build, display logs/summary for all the images.
    FORMATTER.banner("Summary")
    show_build_info(ALL_IMAGES)

    FORMATTER.banner("Errors")
    is_any_build_failed, is_any_build_failed_size_limit = show_build_errors(
        ALL_IMAGES)

    # From all images, filter the images that were supposed to be built and upload their metrics
    BUILT_IMAGES = [image for image in ALL_IMAGES if image.to_build]

    FORMATTER.banner("Upload Metrics")
    upload_metrics(BUILT_IMAGES, BUILDSPEC, is_any_build_failed,
                   is_any_build_failed_size_limit)

    FORMATTER.banner("Test Env")
    # Set environment variables to be consumed by test jobs
    test_trigger_job = get_codebuild_project_name()
    # Tests should only run on images that were pushed to the repository
    if not is_build_enabled():
        # Ensure we have images populated if do_build is false, so that tests can proceed if needed
        images_to_test = [image for image in ALL_IMAGES if image.to_push]
    else:
        images_to_test = IMAGES_TO_PUSH

    utils.set_test_env(
        images_to_test,
        use_latest_additional_tag=True,
        BUILD_CONTEXT=os.getenv("BUILD_CONTEXT"),
        TEST_TRIGGER=test_trigger_job,
    )