def __init__(self, parent): self.parent = parent if parent is None: self.ctx = context.Context() else: self.ctx = context.Context(parent.get_ctx())
def main(): parser = argparse.ArgumentParser(description="The interpreter for Pluto") parser.add_argument("-f", "--file", action="store", dest="file", type=str, help="the file to execute") parser.add_argument("-p", "--parse", action="store_true", default=False, help="just parse the file - don't execute it") parser.add_argument("-t", "--tree", action="store_true", default=False, help="print the parse tree") parser.add_argument("-i", "--interactive", action="store_true", default=False, help="enter interactive mode after the file has been run") parser.add_argument("-n", "--no-prelude", action="store_true", dest="no_prelude", help="don't load the prelude") parser.add_argument("-v", "--version", action="version", version="Pluto, early beta version") args = parser.parse_args() if args.file == None: ctx = c.Context() if not args.no_prelude: import_prelude(ctx) repl(ctx) else: try: text = open(args.file).read() if args.parse or args.tree: tokens = l.lex(text) parse = p.Parser(tokens) program = parse.parse_program() if len(parse.errors) > 0: parse.print_errors() elif args.tree: print(program) return ctx = c.Context() if not args.no_prelude: import_prelude(ctx) execute(text, False, ctx) if args.interactive: print() repl(ctx) except FileNotFoundError: print("File not found: %s" % args.file) return
def get_group_key(self, field_groups, alias_groups, select_context, alias_group_result_context, index): """Computes a singleton context with the values for a group key. The evaluation has already been done; this method just selects the values out of the right contexts. Arguments: field_groups: A list of ColumnRefs for the field groups to use. alias_groups: A list of strings of alias groups to use. select_context: A context with the data for the table expression being selected from. alias_group_result_context: A context with the data for the grouped-by select fields. index: The row index to use from each context. """ result_columns = collections.OrderedDict() for field_group in field_groups: column_key = (field_group.table, field_group.column) source_column = select_context.columns[column_key] result_columns[column_key] = context.Column( # TODO(Samantha): This shouldn't just be nullable. type=source_column.type, mode=tq_modes.NULLABLE, values=[source_column.values[index]]) for alias_group in alias_groups: column_key = (None, alias_group) source_column = alias_group_result_context.columns[column_key] result_columns[column_key] = context.Column( # TODO(Samantha): This shouldn't just be nullable. type=source_column.type, mode=tq_modes.NULLABLE, values=[source_column.values[index]]) return context.Context(1, result_columns, None)
def main(): # __init() print("start...") config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'config.json')) with open(config_path, 'r') as f: conf = json.load(f) logger.info(conf) # discovery.run() dataReporter = reporter.DataReporter(conf['reportUrl']) ctx = context.Context(conf, dataReporter) context.setContext(ctx) global discovery discovery = discover.Discover('test') devInfos = discovery.discover() global manager manager = device.DeviceManager() manager.addDevices(devInfos) global exit exit = False loop_index = 0 while not exit: time.sleep(5) manager.monitorProcs(devInfos) loop_index = loop_index + 1 if loop_index > 60: loop_index = 0 devInfos = refresh(manager, devInfos)
def run(args): e = event.read_event(args.event) c = context.Context(args.timeout, args.arn_string, args.version_name) if args.library is not None: load_lib(args.library) request_id = uuid.uuid4() func = load(request_id, args.file, args.function) logger = logging.getLogger() result = None logger.info("Event: {}".format(e)) logger.info("START RequestId: {}".format(request_id)) start_time = timeit.default_timer() result, err_type = execute(func, e, c) end_time = timeit.default_timer() logger.info("END RequestId: {}".format(request_id)) if type(result) is TimeoutException: logger.error("RESULT:\n{}".format(result)) else: logger.info("RESULT:\n{}".format(result)) duration = "{0:.2f} ms".format((end_time - start_time) * 1000) logger.info("REPORT RequestId: {}\tDuration: {}".format( request_id, duration)) if err_type is not None: sys.exit(EXITCODE_ERR)
def main(): try: path = os.environ['PATH_INFO'] except KeyError: path = None else: if path.startswith('/files/'): main_file(path[7:]) ctxt = context.ctxt = context.Context() # Insert the environment (CGI) variables ctxt.update(os.environ) # Update with defaults from the config ctxt.update(config.defaults) # Update with all cookies for c in Cookie.SimpleCookie(os.environ.get('HTTP_COOKIE', '')).values(): ctxt[c.key] = c.value form = context.form = load_form() ctxt.update(form) # Override certain context values based on configured settings ctxt[ALLOWRAW] = config.allowraw ctxt[FILESPREFIX] = config.filesprefix or os.environ[ 'SCRIPT_NAME'] + '/files/' if path is not None: main_path(ctxt, path) else: main_form(ctxt)
def infer(expr, type=None, ctxt=None): """Infer the type of an expression and return the pair (type, proof obligations) or raise an exception of type ExprTypeError. Arguments: - `expr`: an expression """ if ctxt == None: ty_ctxt_name = fresh_name.get_name('_ty_ctxt') ty_ctxt = context.Context(ty_ctxt_name) else: ty_ctxt = ctxt prf_obl_name = fresh_name.get_name('_ty_goals') prf_obl = goals.empty_goals(prf_obl_name, ty_ctxt) #slight hack here: we compare pointers to avoid calling the # __eq__ method of type which may be overloaded. # There should only be one instance of # the None object, so pointer equality is valid. if type is None: ty = ExprInfer().visit(expr, prf_obl) return (ty, prf_obl) else: if ExprCheck().visit(expr, type, prf_obl): return (type, prf_obl) else: mess = "Expected {0!s} to be of type {1!s}"\ .format(expr, type) raise ExprTypeError(mess, expr)
def empty_context_from_select_fields(self, select_fields): return context.Context( 0, collections.OrderedDict( ((None, select_field.alias), context.Column(select_field.expr.type, [])) for select_field in select_fields), None)
def delete_item(): ''' Delete keymap action. ''' import context context.Context(delete=True)
def _initialise_context(self, ctx, session=None): if not isinstance(ctx, context.Context): raise TypeError, "item to add is not a saga.Context instance" # create a deep copy of the context (this keeps _adaptor etc) ctx_clone = context.Context(ctx.type) ctx._attributes_deep_copy(ctx_clone) if not session: session = self._session logger = self._logger else: logger = session._logger # try to initialize that context, i.e. evaluate its attributes and # infer additional runtime information as needed # logger.debug ("adding context : %s" % (ctx_clone)) if not session: logger.warning ("cannot initialize context - no session: %s" \ % (ctx_clone)) else: try: ctx_clone._initialize(session) except se.SagaException as e: msg = "Cannot add context, initialization failed (%s)" % str(e) raise se.BadParameter(msg) return ctx_clone
def _init_if_needed(): global _ctx, _lib if _ctx is not None: return _ctx = context.Context() __lbuild = context.CPPLibBuilder(_ctx) ptr = cppinl.CHandle(long) dptr = cppinl.CHandle(float) __lbuild.decl_func('long_alloc', r''' p = new int64_t[len]; ''', p=ptr, len=long) __lbuild.decl_func('long_free', r''' delete [] p; ''', p=ptr) __lbuild.decl_func('dub_alloc', r''' p = new double[len]; ''', p=dptr, len=long) __lbuild.decl_func('dub_free', r''' delete [] p; ''', p=dptr) _lib = __lbuild.make()
def get_contexts(self): """ Generate a list of context based on configuration and test requirements """ reqs = req.Requirements() if not reqs.check_if_all_requirements_are_met(self.tc): return [] conf_params = self._get_configured_params() common_params = self._get_common_params() ctx_arg_keys = list(conf_params.keys()) + list(common_params.keys()) builds = self._get_builds() ctx_params = itertools.product(builds, *conf_params.values(), *common_params.values()) ctxs = [] for cp in ctx_params: build = cp[0] kwargs = dict(zip(ctx_arg_keys, cp[1:])) c = ctx.Context(build, **kwargs) c.cwd = self.tc.cwd ctxs.append(c) return ctxs
def getSampleSentences(self): self.contexts = context.Context() self.userid = self.contexts.getApiKey() operations = self.getUserDBconnection() data = operations.find({"_id": self.userid}) if (data.count() == 0): raise exceptions.InvalidApiKey("Invalid ApiKey") else: data_dict = {} for i in data: data_dict = i SampleSentences = list() for i in range(0, len(data_dict["actions"])): gather = list() isstring = list() if (len(data_dict["actions"]) != 0): self.ActionNames.append(data_dict["actions"][i]["name"]) self.ParamsNeeded.append( data_dict["actions"][i]["param_def"]) for j in range( 0, len(data_dict["actions"][i]["sentence_def"])): gather.append(data_dict["actions"][i]["sentence_def"] [j]["sentence"].strip()) isstring.append(data_dict["actions"][i]["sentence_def"] [j]["isstring"]) SampleSentences.append(gather) self.IsString.append(isstring) print(SampleSentences) self.DBSentences = SampleSentences return SampleSentences
def createContext(node, lang): lookaheadTags = None siblingTags = None parentTags = None gpTags = None if "children" in node: lookaheadTags = [] for child in node["children"]: if "tags" in child: lookaheadTags += child["tags"] if not lookaheadTags == None and len(lookaheadTags) == 0: lookaheadTags = None if "parent" in node and "children" in node["parent"]: siblingTags = [] for child in node["parent"]["children"]: if "tags" in child and not tagsMatch(child, node, node["parent"], node["parent"], lang, False, 0, 0): siblingTags += child["tags"] if not siblingTags == None and len(siblingTags) == 0: siblingTags = None if "parent" in node and "tags" in node["parent"]: parentTags = node["parent"]["tags"] if "parent" in node and "parent" in node["parent"] and "tags" in node[ "parent"]["parent"]: gpTags = node["parent"]["parent"]["tags"] return context.Context(lang, lookaheadTags, siblingTags, parentTags, gpTags)
def get_contexts(self): """ Generate a list of contexts based on configuration and test requirements """ reqs = req.Requirements() if not reqs.check_if_all_requirements_are_met(self.tc): return [] conf_params = self._get_configured_params() common_params = self._get_common_params() ctx_elem_names = list(conf_params.keys()) + list(common_params.keys()) builds = self._get_builds() # Generate cartesian product of builds and context elements. # Each element of the product serves as a base for the separate # context in which the test is run ctx_params = itertools.product(builds, *conf_params.values(), *common_params.values()) ctxs = [] for cp in ctx_params: build = cp[0] ctx_elems = dict(zip(ctx_elem_names, cp[1:])) c = ctx.Context(build, **ctx_elems) c.cwd = self.tc.cwd ctxs.append(c) return ctxs
def prim_proc(compiler, source): proc_name = pycode.name('proc') tpl = 'def %s(%s_in):\n return $#\n%s' % (proc_name, proc_name, proc_name) with context.Context(): proc_compiler = proc.Compiler(compiler) return pycode.create(tpl, proc_compiler.compile(proc_name + '_in', source))
def _internal_simple(path, project_bases, user, root_dir, rietveld_obj): """Generates a PendingManager commit queue for chrome/trunk/tools/build.""" local_checkout = checkout.SvnCheckout( root_dir, os.path.basename(path), user, None, 'svn://svn.chromium.org/chrome/trunk/' + path, [chromium_copyright.process]) context_obj = context.Context( rietveld_obj, local_checkout, async_push.AsyncPush( 'https://chromium-status.appspot.com/cq', _chromium_status_pwd(root_dir))) verifiers_no_patch = [ project_base.ProjectBaseUrlVerifier(project_bases), reviewer_lgtm.ReviewerLgtmVerifier( _get_chromium_committers(), [re.escape(user)]), ] verifiers = [ presubmit_check.PresubmitCheckVerifier(context_obj, timeout=900), ] return pending_manager.PendingManager( context_obj, verifiers_no_patch, verifiers)
def execute(arguments, ui=user_interface.CLI, brk=broker.Broker()): """ Execute ftw_compatible_tool. Arguments: - arguments: A string list fo the arguments for pywb. - ui: A class inherited from user_interface.Interactor. This will be used for output and interaction. - brk: A Broker object. Pass this into execute if you have your own subscriber or publisher. """ ctx = context.Context(brk, delimiter=traffic.Delimiter("magic")) ui = ui(ctx) brk.publish(broker.TOPICS.SHOW_UI, "welcome") brk.publish(broker.TOPICS.SHOW_UI, "tutorial") traffic.RawRequestCollector(ctx) traffic.RawResponseCollector(ctx) traffic.RealTrafficCollector(ctx) log.LogCollector(ctx) base.Base(ctx) args = parse(arguments) database.Sqlite3DB(ctx, args.database) if args.interact: ui.interact() else: commands = args.execute.strip().split("|") for command in commands: brk.publish(broker.TOPICS.COMMAND, *tuple(shlex.split(command)))
def obtener_contexto_desde_configuracion(cp): """ Genera un objeto Context desde los parámetros de configuración. :param cp: parámetros de configuración """ ctx = context.Context() ctx.home = DBMANAGER_HOME ctx.version = DBMANAGER_VERSION ctx.instancia = cp.get("TDI", "cola") ctx.url = ("%s/tdi/AMMForm?" % cp.get("TDI", "url_formatos")) ctx.queue = cp.get("TDI", "cola") ctx.user = cp.get("TDI", "user") ctx.password = cp.get("TDI", "password") ctx.batch_size = cp.get("TDI", "batch_size") ctx.client = pymongo.MongoClient(cp.get("MONGO", "uri")) ctx.db = ctx.client.get_database(cp.get("MONGO", "db")) ctx.debug = cp.getint("MONGO", "debug") ctx.sql_engine = sqlalchemy.create_engine(cp.get("SQL", "uri"), pool_pre_ping=True, pool_recycle=int( cp.get("SQL", "recycle"))) ctx.sql_metadata = sqlalchemy.MetaData(bind=ctx.sql_engine) ctx.amqp_dbmanager = cp.get("AMQP", "dbmanager") ctx.amqp_monitor = cp.get("AMQP", "monitor") return ctx
def clear(self): self.coordinates = np.empty((0,2), np.float64) self.speedinfo = np.empty((0,2), np.float64) self.imu_values = np.empty((0,9), np.float64) self.context = context.Context() self.framesMeta = [] self.annotCars = []
def setUp(self): super(TestCase, self).setUp() self.mock(breakpad, 'SendStack', self._send_stack_mock) self.context = context.Context(RietveldMock(self), SvnCheckoutMock(self), AsyncPushMock(self)) self.pending = pending_manager.PendingCommit(42, '*****@*****.**', [], 23, '', 'bleh', [])
def make_context(self, name_type_values_triples): num_rows = len(name_type_values_triples[0][2]) # The constructor does all relevant invariant checks, so we don't have # to do that here. return context.Context( num_rows, collections.OrderedDict( ((None, name), context.Column(col_type, values)) for name, col_type, values in name_type_values_triples), None)
def _init_context(self, **ctx_params): """Initialize context class using provided parameters""" fs = ctx_params['fs'](self.config) build = ctx_params['build'](self.config) return ctx.Context(self, self.config, fs=fs, build=build, valgrind=self.valgrind)
def empty_context_from_select_fields(self, select_fields): return context.Context( 0, collections.OrderedDict(( (None, select_field.alias), # TODO(Samantha): This shouldn't just be nullable context.Column(type=select_field.expr.type, mode=tq_modes.NULLABLE, values=[])) for select_field in select_fields), None)
def check(self, ast): try: pairs = self.analyze([C.Context()], ast) state = C.State([s for s, _ in pairs]) return U.verify(state) except (ValueError, CheckError, T.UnificationError) as e: if not self.careful and 'Unsatisfiable constraint' in str(e): self.carefully().check(ast) else: raise
def main(): signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGQUIT, signal_handler) initialize_logging() ctx = context.Context(initialize_configuration()) logging.debug("initializing") ctx.load_blocks() try: ctx.event_loop() except KeyboardInterrupt: logging.debug("received interrupt signal, stopping") logging.debug("exiting")
def deserialize_context(self, ctxt): rpc_ctxt_dict = ctxt.copy() user_id = rpc_ctxt_dict.pop('user_id', None) if not user_id: user_id = rpc_ctxt_dict.pop('user', None) tenant_id = rpc_ctxt_dict.pop('tenant_id', None) if not tenant_id: tenant_id = rpc_ctxt_dict.pop('project_id', None) return context.Context(user_id, tenant_id, load_admin_roles=False, **rpc_ctxt_dict)
def evaluate_select_fields(self, select_fields, ctx): """Evaluate a table result given the data the fields have access to. Arguments: select_fields: A list of typed_ast.SelectField values to evaluate. context: The "source" context that the expressions can access when being evaluated. """ return context.Context( ctx.num_rows, collections.OrderedDict( self.evaluate_select_field(select_field, ctx) for select_field in select_fields), None)
def _prepare(self): self.base = self.psp + 0x10 if self.symbols: logger.debug("loading symbols from %s at %04X", self.symbols, self.base) self.ctx = context.Context() self.ctx.loadSymbols(self.symbols, self.base) try: self.dseg = self.ctx.var("__SEG__DATA0") except Exception: self.dseg = None self.dbox = dosbox.Dosbox() self.loaded()
def setUp(self): super(TestCase, self).setUp() self.mock(breakpad, 'SendStack', self._send_stack_mock) self.context = context.Context( RietveldMock(self), SvnCheckoutMock(self), AsyncPushMock(self), False) self.pending = pending_manager.PendingCommit( issue=42, owner='*****@*****.**', reviewers=[], patchset=23, base_url='', description=u'bleh', messages=[])