def test_is_set(self): true = self.assertTrue true( PType.from_str("{bool}").is_set() ) true( PType.from_str("{unit}").is_set() ) true( PType.from_str("{(int, bool, float)}").is_set() ) true( PType.from_str("{ int -> bool }").is_set() ) true( PType.from_str("{{str: bool}}").is_set() )
def test_quantify(self): equal = self.assertEqual equal( str(PType.from_str("'a").quantify()), "V'a.'a" ) equal( str(PType.from_str("'a -> 'a").quantify()), "V'a.'a -> 'a" ) equal( str(PType.from_str("'a -> 'b").quantify()), "V'a.V'b.'a -> 'b" ) equal( str(PType.from_str("{'g : ('a, int)}").quantify()), "V'a.V'g.{'g: ('a, int)}" )
def _check_expr(self, s, expr_kind, typ, expected): """Typechecks the string C{s} as an C{expr_type} expression.""" a = ast.parse(s).body[0].value f = expr_template % expr_kind if expected == "pass" or expected == "fail": t = PType.from_str(typ) if expected == "pass": self.assertEqual( True, call_function(f, a, t, {}), "%s should typecheck as %s but does not." % (s, t)) elif expected == "fail": self.assertEqual(False, call_function(f, a, t, {}), "%s shouldn't typecheck as %s but does." % (s, t)) elif issubclass(eval(expected), Exception): # if the expected value is an error, then make sure it # raises the right error. try: t = PType.from_str(typ) call_function(f, a, t, {}) except eval(expected): pass else: self.fail("Should have raised error %s, but does not. (%s)." % (expected, s)) else: raise TestFileFormatError("Expression tests can only be" + \ " specified as passing, failing, or raising an error " + \ " specified in errors.py, but this test was specified " + \ " as expecting: " + expected)
def _check_expr(self, s, expr_kind, typ, expected): """Typechecks the string C{s} as an C{expr_type} expression.""" a = ast.parse(s).body[0].value f = expr_template % expr_kind if expected == "pass" or expected == "fail": t = PType.from_str(typ) if expected == "pass": self.assertEqual(True, call_function(f, a, t, {}), "%s should typecheck as %s but does not." % (s,t)) elif expected == "fail": self.assertEqual(False, call_function(f, a, t, {}), "%s shouldn't typecheck as %s but does." % (s, t)) elif issubclass(eval(expected), Exception): # if the expected value is an error, then make sure it # raises the right error. try: t = PType.from_str(typ) call_function(f, a, t, {}) except eval(expected): pass else: self.fail("Should have raised error %s, but does not. (%s)." % (expected, s)) else: raise TestFileFormatError("Expression tests can only be" + \ " specified as passing, failing, or raising an error " + \ " specified in errors.py, but this test was specified " + \ " as expecting: " + expected)
def test_is_tuple(self): true = self.assertTrue true( PType.from_str("(float,bool,int)").is_tuple() ) true( PType.from_str("(bool,)").is_tuple() ) true( PType.from_str("(bool,float,int,bool)").is_tuple() ) true( PType.from_str("([int],[bool])").is_tuple() ) true( PType.from_str("({int:float},{float:int})").is_tuple() )
def parse_type_dec(line, lineno, var_name, type_spec): """Constructs a L{ast_extensions.TypeDec} from the type declaration in the provided line. The name of the variable and the name of the type are passed since they are stored when matching the typedec regex against the line and it would be wasteful to discard that information. @type line: str @param line: the line of source code containing the type declaration. @type lineno: int @param lineno: the index of this line in the orginial source code file. @type var_name: str @param var_name: the name of the identifier whose type is being declared. @type type_name: str @param type_name: the name of the type which is being declared. @rtype: L{ast_extensions.TypeDec} @return: a L{ast_extensions.TypeDec} node for the declaration in the given line. """ col = line.index(var_name) name_node = ast.Name(ctx=TypeStore(), id=var_name, lineno=lineno, col_offset=col) col_offset = line.index("#:") return TypeDec([name_node], PType.from_str(type_spec), lineno, col_offset)
def __init__(self, targets, t, line, col=None): """ Create a `TypeDec` node with the supplied parameters. #### Parameters - `targets`: list of identifiers (as `ast.Name` objects) having their types declared. - `t`: the type being assigned, as a PType or string. If a string is provided, it is parsed into the appropriate PType. - `line`: the (int) line number of the declaration in the source code. - `col`: [optional] the (int) column number of the declaration in the source code. If not provided, then the column number will just be set as `None`. """ self.targets = targets self.lineno = line if col is not None: self.col_offset = col if type(t) == str: self.t = PType.from_str(t) assert self.t.__class__ == PType, \ ("Got a %s back from TypeSpecParser.parse, not a PType" % cname(self.t.__class__)) elif t.__class__ == PType: self.t = t else: assert False, ("t needs to be specified as str or PType, not " + cname(t)) # these are instance variables provided by AST nodes to allow traversal # / parsing of the nodes. self._fields = ("targets", "t") self._attributes = ("lineno", "col_offset")
def _check_Call_expr(call, t, env): """Application.""" assert call.__class__ is ast.Call f = call.func a = call.args k = call.keywords s = call.starargs kw = call.kwargs # All App rules have specific forms for keywords, starargs, and kwargs. if not k and not s and not kw: # (App1) assignment rule. if not a: return check_expr(f, PType.arrow(unit_t, t), env) # (App2) assignment rule. elif len(a) == 1 and f.__class__ is ast.Name: f_t = env_get(env, f.id) return check_expr(a[0], f_t.dom, env) and f_t.ran == t # (App3) assignment rule. elif f.__class__ is ast.Name: f_t = env_get(env, f.id) tup = ast.Tuple([b for b in a], ast.Load()) return check_expr(tup, f_t.dom, env) and f_t.ran == t # No assignment rule found. return False
def test_free_vars(self): equal = self.assertEqual alpha = PType.from_str("'a") beta = PType.from_str("'b") gamma = PType.from_str("'g") equal( PType.from_str("'a").free_type_vars(), {alpha} ) equal( PType.from_str("'a -> 'a").free_type_vars(), {alpha} ) equal( PType.from_str("'a -> 'b").free_type_vars(), {alpha, beta} ) equal( PType.from_str("{'g : ('a, int)}").free_type_vars(), {alpha, gamma} )
def _check_For_stmt(stmt, env): """For Loop.""" assert stmt.__class__ is ast.For x = stmt.target e = stmt.iter b0 = stmt.body b1 = stmt.orelse x_t = infer_expr(x, env) # (For) assignment rule. -- restricted by type inference return (x_t and check_expr(e, PType.list(x_t), env) and check_stmt_list(b0, env) and check_stmt_list(b1, env))
def test_is_basetype(self): true = self.assertTrue true( int_t.is_base() ) true( float_t.is_base() ) true( str_t.is_base() ) true( unicode_t.is_base() ) true( bool_t.is_base() ) true( unit_t.is_base() ) equal = self.assertEqual equal( PType.int(), int_t ) equal( PType.float(), float_t ) equal( PType.string(), str_t ) equal( PType.unicode(), unicode_t ) equal( PType.bool(), bool_t ) equal( PType.unit(), unit_t ) all(equal(PType.from_str(k), v) for (k,v) in base_ts.iteritems())
def infer_Tuple_expr(tup, env): """ Determine the type of AST `Tuple` expression under type environment `env`. `ast.Tuple` - `elts`: Python list of contained expr nodes - `ctx`: context of the expr (e.g., load, store) """ assert tup.__class__ is ast.Tuple elts_list = tup.elts if all(infer_expr(e, env) != None for e in elts_list): # (tup) assignment rule. return PType.tuple([infer_expr(e, env) for e in elts_list]) else: # No assignment rule found. return None
def valid_int_slice(l, u, s, env): """ Determine if three AST expr nodes representing the parameters to a simple slice are valid integers (or Nones) under type environment `env`. `l`: AST expr node representing lower bound of slice. `u`: AST expr node representing upper bound of slice. `s`: AST expr node representing step of slice. `n`: (int) length of the collection being sliced. """ # These are imported here because we don't want to pollute the entire util # module with potential circular references. In theory, util.py functions # shouldn't need to refer to checking or inferring, but this is a function # that happens to be shared between typecheck.py and infer.py. from check import check_expr from ptype import PType int_t = PType.int() return ((l is None or check_expr(l, int_t, env)) and (u is None or check_expr(u, int_t, env)) and (s is None or node_is_None(s) or check_expr(s, int_t, env)))
def __init__(self, targets, t, line, col = None): """ Create a `TypeDec` node with the supplied parameters. #### Parameters - `targets`: list of identifiers (as `ast.Name` objects) having their types declared. - `t`: the type being assigned, as a PType or string. If a string is provided, it is parsed into the appropriate PType. - `line`: the (int) line number of the declaration in the source code. - `col`: [optional] the (int) column number of the declaration in the source code. If not provided, then the column number will just be set as `None`. """ self.targets = targets self.lineno = line if col is not None: self.col_offset = col if type(t) == str: self.t = PType.from_str(t) assert self.t.__class__ == PType, \ ("Got a %s back from TypeSpecParser.parse, not a PType" % cname(self.t.__class__)) elif t.__class__ == PType: self.t = t else: assert False, ("t needs to be specified as str or PType, not " + cname(t)) # these are instance variables provided by AST nodes to allow traversal # / parsing of the nodes. self._fields = ("targets", "t") self._attributes = ("lineno", "col_offset")
def infer_List_expr(lst, env): """ Determine the type of AST `List` expression under type environment `env`. `ast.List` - `elts`: Python list of contained expr nodes - `ctx': context of the expr (e.g., load, store) """ assert lst.__class__ is ast.List elts_list = lst.elts first_type = infer_expr(elts_list[0], env) if all(check.check_expr(e, first_type, env) for e in elts_list[1:]): # (lst) assignment rule. return PType.list(first_type) else: # No assignment rule found. return None
# FIXME: this is copied from unit_test_core, should be abstracted # away somewhere, but don't know the best way to deal with logging. with open(file_name, 'r') as f: text = f.read() untyped_ast = ast.parse(text) typedecs = parse_type_decs(file_name) typed_ast = TypeDecASTModule(untyped_ast, typedecs) if check_mod(typed_ast.tree): print "Typechecked correctly!" else: print "Did not typecheck." except IOError as e: print "File not found: %s" % e.filename elif opt.expr and opt.type and not opt.filename and not opt.infer_expr: e = ast.parse(opt.expr).body[0].value t = PType.from_str(opt.type) template = ("YES! -- %s typechecks as type %s" if check_expr(e, t, {}) else "NO! --- %s does not typecheck as type %s") print template % (opt.expr, t) elif opt.infer_expr and not opt.filename and not opt.expr and not opt.type: e = ast.parse(opt.infer_expr).body[0].value print "%s -- is the inferred type of %s" % (infer_expr(e, {}), opt.infer_expr) else: parser.print_help()
try: # FIXME: this is copied from unit_test_core, should be abstracted # away somewhere, but don't know the best way to deal with logging. with open(file_name, 'r') as f: text = f.read() untyped_ast = ast.parse(text) typedecs = parse_type_decs(file_name) typed_ast = TypeDecASTModule(untyped_ast, typedecs) if check_mod(typed_ast.tree): print "Typechecked correctly!" else: print "Did not typecheck." except IOError as e: print "File not found: %s" % e.filename elif opt.expr and opt.type and not opt.filename and not opt.infer_expr: e = ast.parse(opt.expr).body[0].value t = PType.from_str(opt.type) template = ("YES! -- %s typechecks as type %s" if check_expr(e, t, {}) else "NO! --- %s does not typecheck as type %s") print template % (opt.expr, t) elif opt.infer_expr and not opt.filename and not opt.expr and not opt.type: e = ast.parse(opt.infer_expr).body[0].value print "%s -- is the inferred type of %s" % (infer_expr(e, {}), opt.infer_expr) else: parser.print_help()
import ast import logging from util import cname, slice_range, node_is_int, valid_int_slice from errors import TypeUnspecifiedError from ptype import PType from settings import DEBUG_INFER # Need to use this form to resolve circular import. import check int_t = PType.int() float_t = PType.float() bool_t = PType.bool() str_t = PType.string() unicode_t = PType.unicode() unit_t = PType.unit() log = None def i_debug(s, cond=True): log.debug(s, DEBUG_INFER and cond) def call_function(fun_name, *args, **kwargs): return globals()[fun_name](*args, **kwargs) def env_get(env, var_id): """
def test_is_arrow(self): true = self.assertTrue true( PType.from_str("int -> float").is_arrow() ) true( PType.from_str("unicode -> {int:float}").is_arrow() ) true( PType.from_str("unicode -> str -> int").is_arrow() ) true( PType.from_str("(unicode -> str) -> int").is_arrow() )
def test_is_map(self): true = self.assertTrue true( PType.from_str("{int:float}").is_map() ) true( PType.from_str("{float -> str:int}").is_map() ) true( PType.from_str("{int:unicode}").is_map() ) true( PType.from_str("{(int,int):float}").is_map() )
def test_is_list(self): true = self.assertTrue true( PType.from_str("[int]").is_list() ) true( PType.from_str("[float]").is_list() ) true( PType.from_str("[{float:str}]").is_list() )
import sys import unittest from lepl import sexpr_to_tree # Include src in the Python search path sys.path.insert(0, '../src') from ptype import (PType, TypeSpecParser, better_sexpr_to_tree, Lst, Stt, Tup, Mpp, Arr) int_t = PType.int() float_t = PType.float() str_t = PType.string() unicode_t = PType.unicode() bool_t = PType.bool() unit_t = PType.unit() base_ts = {"int": int_t, "float": float_t, "str": str_t, "unicode": unicode_t, "bool": bool_t, "unit": unit_t} class PTypeTests(unittest.TestCase): def test_is_basetype(self): true = self.assertTrue true( int_t.is_base() ) true( float_t.is_base() ) true( str_t.is_base() ) true( unicode_t.is_base() ) true( bool_t.is_base() ) true( unit_t.is_base() )
def test_is_var(self): true = self.assertTrue true( PType.from_str("'a").is_var() ) true( PType.from_str("'alpha").is_var() ) true( PType.from_str("'Yothere").is_var() ) true( PType.from_str("'hiB9").is_var() )
def infer_Subscript_expr(subs, env): """ Determine the type of AST `Subscript` expression under type environment `env`. `ast.Subscript` - `value`: the collection being subscripted - `slice`: `ast.Index` or `ast.Slice` + `value`: expr used as index (if `ast.Index`) + `lower`: expr used as lower bound (if `ast.Slice`) + `upper`: expr used as upper bound (if `ast.Slice`) + `step`: expr used as step (if `ast.Slice`) We can only subscript tuples with numeric literals because the inference algorithm needs to actually know the values of the subscript parameters. """ assert subs.__class__ is ast.Subscript col = subs.value col_t = infer_expr(col, env) is_index = subs.slice.__class__ is ast.Index is_slice = subs.slice.__class__ is ast.Slice assert is_index or is_slice # Store the attributes of the slice. if is_index: i = subs.slice.value else: # is_slice l = subs.slice.lower u = subs.slice.upper s = subs.slice.step if col_t is None: # If we can't assign a type to the collection, then we can't assign a # type to its subscript. return None # String subscripting elif col_t == str_t or col_t == unicode_t: if is_index and infer_expr(i, env) == int_t: # (sidx) assignment rule. return col_t elif is_slice and valid_int_slice(l, u, s, env): # (sslc) assignment rule. return col_t else: # No assignment rule found. return None # List subscripting elif col_t.is_list(): if is_index and check.check_expr(i, int_t, env): # (lidx) assignment rule. return col_t.elt elif is_slice and valid_int_slice(l, u, s, env): # (lslc) assignment rule. return col_t else: # No assignment rule found. return None # Tuple subscripting elif col_t.is_tuple(): col_ts = col_t.elts n = len(col_ts) if is_index and node_is_int(i) and -n <= i.n < n: # (tidx) assignment rule. return col_ts[i.n] elif is_slice: rng = slice_range(l, u, s, len(col_ts)) if rng is not None: # (tslc) assignment rule. return PType.tuple([col_ts[i] for i in rng]) else: # No assignment rule found. return None else: # No assignment rule found. return None else: # No assignment rule found. return None