def run_on_kubernetes(args): if args.test: rethink_cpu_request = hub_cpu_request = '10m' rethink_memory_request = hub_memory_request = '200Mi' else: hub_cpu_request = '300m' hub_memory_request = '1Gi' rethink_cpu_request = '300m' rethink_memory_request = '1Gi' util.ensure_secret_exists('sendgrid-api-key', 'sendgrid') util.ensure_secret_exists('zendesk-api-key', 'zendesk') args.local = False # so tag is for gcloud if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) tag = util.get_tag(args, NAME, build) opts = { 'image_hub' : tag, 'replicas' : args.replicas, 'pull_policy' : util.pull_policy(args), 'min_read_seconds' : args.gentle, 'smc_db_hosts' : args.database_nodes, 'smc_db_pool' : args.database_pool_size, 'smc_db_concurrent_warn' : args.database_concurrent_warn, 'hub_cpu_request' : hub_cpu_request, 'hub_memory_request' : hub_memory_request, 'rethink_cpu_request' : rethink_cpu_request, 'rethink_memory_request' : rethink_memory_request } if args.database_nodes == 'localhost': from argparse import Namespace ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False) opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build) filename = 'smc-hub-rethinkdb-proxy.template.yaml' else: filename = '{name}.template.yaml'.format(name=NAME) t = open(join('conf', filename)).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: r = t.format(**opts) #print(r) tmp.write(r) tmp.flush() util.update_deployment(tmp.name) if NAME not in util.get_services(): util.run(['kubectl', 'expose', 'deployment', NAME])
def run_on_kubernetes(args): if args.test: rethink_cpu_request = hub_cpu_request = '10m' rethink_memory_request = hub_memory_request = '200Mi' else: hub_cpu_request = '500m' hub_memory_request = '1Gi' rethink_cpu_request = '500m' rethink_memory_request = '2Gi' util.ensure_secret_exists('sendgrid-api-key', 'sendgrid') util.ensure_secret_exists('zendesk-api-key', 'zendesk') args.local = False # so tag is for gcloud if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) tag = util.get_tag(args, NAME, build) opts = { 'image_hub': tag, 'replicas': args.replicas, 'pull_policy': util.pull_policy(args), 'min_read_seconds': args.gentle, 'smc_db_hosts': args.database_nodes, 'smc_db_pool': args.database_pool_size, 'smc_db_concurrent_warn': args.database_concurrent_warn, 'hub_cpu_request': hub_cpu_request, 'hub_memory_request': hub_memory_request, 'rethink_cpu_request': rethink_cpu_request, 'rethink_memory_request': rethink_memory_request } if args.database_nodes == 'localhost': from argparse import Namespace ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False) opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build) filename = 'smc-hub-rethinkdb-proxy.template.yaml' else: filename = '{name}.template.yaml'.format(name=NAME) t = open(join('conf', filename)).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: r = t.format(**opts) #print(r) tmp.write(r) tmp.flush() util.update_deployment(tmp.name) if NAME not in util.get_services(): util.run(['kubectl', 'expose', 'deployment', NAME])
def run_on_kubernetes(args): create_gcloud_secret() context = util.get_cluster_prefix() namespace = util.get_current_namespace() if len(args.number) == 0: # Figure out the nodes based on the names of persistent disks, or just node 0 if none. args.number = range(max(1,len(get_persistent_disks(context, namespace)))) if 'storage-projects' not in util.get_services(): util.run(['kubectl', 'create', '-f', 'conf/service.yaml']) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) if not args.tag: tag = tag[:tag.rfind('-')] # get rid of the final -[service] part of the tag. t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() ensure_ssh() for number in args.number: deployment_name = "{name}{number}".format(name=NAME, number=number) ensure_persistent_disk_exists(context, namespace, number, args.size, args.type) with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, number = number, gcloud_bucket = gcloud_bucket(namespace=namespace), pd_name = pd_name(context=context, namespace=namespace, number=number), health_delay = args.health_delay, pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def build_docker(args): if args.commit: args.tag += ('-' if args.tag else '') + args.commit[:6] tag = util.get_tag(args, NAME) build(tag, args.rebuild, args.upgrade, args.commit) if not args.local: util.gcloud_docker_push(tag)
def run_on_kubernetes(args): if args.test: cpu_request = '10m' memory_request = '200Mi' else: cpu_request = '500m' memory_request = '2Gi' context = util.get_cluster_prefix() namespace = util.get_current_namespace() if len(args.number) == 0: # Figure out the nodes based on the names of persistent disks, or just node 0 if none. args.number = range(max(1,len(get_persistent_disks(context, namespace)))) ensure_services_exist() util.ensure_secret_exists('rethinkdb-password', 'rethinkdb') args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() for number in args.number: ensure_persistent_disk_exists(context, namespace, number, args.size, args.type) with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, number = number, pd_name = pd_name(context=context, namespace=namespace, number=number), health_delay = args.health_delay, cpu_request = cpu_request, memory_request = memory_request, pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): context = util.get_kube_context() namespace = util.get_current_namespace() if len(args.number) == 0: # Figure out the nodes based on the names of persistent disks, or just node 0 if none. args.number = range( max(1, len(get_persistent_disks(context, namespace)))) ensure_services_exist() util.ensure_secret_exists('rethinkdb-password', 'rethinkdb') args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() for number in args.number: ensure_persistent_disk_exists(context, namespace, number, args.size, args.type) with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, number=number, pd_name=pd_name(context=context, namespace=namespace, number=number), health_delay=args.health_delay, pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): validate_project_ids(args) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() resources = { 'requests': { 'memory': "40Mi", 'cpu': '5m' }, 'limits': { 'memory': "1000Mi", 'cpu': "1000m" } } resources = '{' + yaml.dump(resources).replace('\n', ',')[:-1] + '}' with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, project_id=args.project_id, namespace=util.get_current_namespace(), storage_server=args.storage_server, disk_size=args.disk_size, resources=resources, preemptible='true' if args.preemptible else 'false', pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def help(tree: Tree) -> Any: if type(tree) is list: if type(tree[0]) is str: if get_tag(tree[0]) in ["<=", ">=", "<", ">"]: return f(tree, m) return [help(i) for i in tree] else: return tree
def help(t: Tree, stock: List[Tree]): if type(t) is list: tag = get_tag(t[0]) if tag in ["and", "or", "not"]: for st in t[1:]: help(st, stock) else: stock.append(t) else: stock.append(t)
def help(t): if type(t) is list: tag = get_tag(t[0]) if tag == "mod": assert len(t) == 3 if int(t[2]) == m: return t[1] else: return [help(x) for x in t] else: return t
def run_on_kubernetes(args): context = util.get_cluster_prefix() namespace = util.get_current_namespace() tag = util.get_tag(args, NAME, build) t = open('daemon.yaml').read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, namespace = util.get_current_namespace(), pull_policy = util.pull_policy(args))) tmp.flush() util.update_daemonset(tmp.name)
def build_docker(args): if args.commit: args.tag += ('-' if args.tag else '') + args.commit[:6] tag = util.get_tag(args, NAME) if args.rebuild_all: build_base(True) build(tag, args.rebuild, args.commit) if args.local: test_mesg(tag) else: util.gcloud_docker_push(tag)
def run_on_kubernetes(args): context = util.get_kube_context() namespace = util.get_current_namespace() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open('storage-daemon.yaml').read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, namespace = util.get_current_namespace(), pull_policy = util.pull_policy(args))) tmp.flush() util.update_daemonset(tmp.name)
def run_on_kubernetes(args): args.local = False # so tag is for gcloud if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image=tag, replicas=args.replicas, pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name) if NAME not in util.get_services(): util.run(['kubectl', 'expose', 'deployment', NAME])
def run_on_kubernetes(args): context = util.get_cluster_prefix() namespace = util.get_current_namespace() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) # ensure there is a rethinkdb secret, even if blank, so that daemon will start with reduced functionality util.ensure_secret_exists('rethinkdb-password', 'rethinkdb') t = open('storage-daemon.yaml').read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, namespace = util.get_current_namespace(), pull_policy = util.pull_policy(args))) tmp.flush() util.update_daemonset(tmp.name)
def run_on_kubernetes(args): create_kubectl_secret() label_preemptible_nodes() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, cluster_prefix = util.get_cluster_prefix(), node_selector = node_selector(), pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): context = util.get_cluster_prefix() namespace = util.get_current_namespace() tag = util.get_tag(args, NAME, build) # ensure there is a rethinkdb secret, even if blank, so that daemon will start with reduced functionality util.ensure_secret_exists('rethinkdb-password', 'rethinkdb') t = open('storage-daemon.yaml').read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, namespace=util.get_current_namespace(), pull_policy=util.pull_policy(args))) tmp.flush() util.update_daemonset(tmp.name)
def run_on_kubernetes(args): args.local = False # so tag is for gcloud if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, replicas=args.replicas, pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name) if NAME not in util.get_services(): util.run(['kubectl', 'expose', 'deployment', NAME])
def run_on_kubernetes(args): validate_project_ids(args) context = util.get_cluster_prefix() namespace = util.get_current_namespace() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, project_id = args.project_id, namespace = namespace, storage_server = args.storage_server, disk_size = args.disk_size, pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): if args.test or util.get_current_namespace() == 'test': rethink_cpu_request = hub_cpu_request = proxy_cpu_request = '10m' rethink_memory_request = hub_memory_request = proxy_memory_request = '200Mi' else: hub_cpu_request = '500m' hub_memory_request = '1Gi' proxy_cpu_request = '200m' proxy_memory_request = '500Mi' rethink_cpu_request = '500m' rethink_memory_request = '2Gi' util.ensure_secret_exists('sendgrid-api-key', 'sendgrid') util.ensure_secret_exists('zendesk-api-key', 'zendesk') if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) opts = { 'replicas': args.replicas, 'pull_policy': util.pull_policy(args), 'min_read_seconds': args.gentle, 'smc_db_pool': args.database_pool_size, 'smc_db_concurrent_warn': args.database_concurrent_warn, 'hub_cpu_request': hub_cpu_request, 'hub_memory_request': hub_memory_request, 'proxy_cpu_request': proxy_cpu_request, 'proxy_memory_request': proxy_memory_request, 'rethink_cpu_request': rethink_cpu_request, 'rethink_memory_request': rethink_memory_request } for image in IMAGES: opts['image_{image}'.format(image=image)] = get_tag(args, image) from argparse import Namespace ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False) opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy') filename = 'hub.template.yaml' t = open(join('conf', filename)).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: r = t.format(**opts) #print(r) tmp.write(r) tmp.flush() util.update_deployment(tmp.name) if NAME not in util.get_services(): util.run(['kubectl', 'expose', 'deployment', NAME])
def run_on_kubernetes(args): ensure_ssl() if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) print("tag='{tag}', replicas='{replicas}'".format(tag=tag, replicas=args.replicas)) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() namespace = util.get_current_namespace() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, replicas = args.replicas, pull_policy = util.pull_policy(args), namespace = namespace)) tmp.flush() util.update_deployment(tmp.name) expose()
def run_on_kubernetes(args): validate_project_ids(args) context = util.get_cluster_prefix() namespace = util.get_current_namespace() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, project_id=args.project_id, namespace=namespace, storage_server=args.storage_server, disk_size=args.disk_size, pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): ensure_ssl() if args.replicas is None: args.replicas = util.get_desired_replicas(NAME, 2) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) print("tag='{tag}', replicas='{replicas}'".format(tag=tag, replicas=args.replicas)) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() namespace = util.get_current_namespace() with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, replicas=args.replicas, pull_policy=util.pull_policy(args), namespace=namespace)) tmp.flush() util.update_deployment(tmp.name) expose()
def run_on_kubernetes(args): validate_project_ids(args) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() resources = {'requests':{'memory':"40Mi", 'cpu':'5m'}, 'limits':{'memory': "1000Mi", 'cpu': "1000m"}} resources = '{' + yaml.dump(resources).replace('\n',',')[:-1] + '}' with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, project_id = args.project_id, namespace = util.get_current_namespace(), storage_server = args.storage_server, disk_size = args.disk_size, resources = resources, preemptible = 'true' if args.preemptible else 'false', pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def run_on_kubernetes(args): create_kubectl_secret() args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() if args.project_tag: default_image = util.gcloud_docker_repo('smc-project:' + args.project_tag) else: default_image = util.gcloud_most_recent_image('smc-project') with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write(t.format(image = tag, namespace = util.get_current_namespace(), cluster_prefix = util.get_cluster_prefix(), default_image = default_image, node_selector = node_selector(), pull_policy = util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def process_ineq(t: Tree, m: int, f: Callable[[Dict[str, int], int, int], Any]) -> Any: l_coefs1, l_const1 = get_coefs_fraction(t[1], {}) r_coefs1, r_const1 = get_coefs_fraction(t[2], {}) l_coefs = {k: v.numerator for k, v in l_coefs1.items()} r_coefs = {k: v.numerator for k, v in r_coefs1.items()} l_const = l_const1.numerator r_const = r_const1.numerator vars = set(list(l_coefs.keys()) + list(r_coefs.keys())) coefs: Dict[str, int] = {} tag = get_tag(t[0]) if tag == "<=": for k in vars: coefs[k] = l_coefs.get(k, 0) - r_coefs.get(k, 0) const = r_const - l_const # print(l_coefs, l_const, r_coefs, r_const) # print(coefs, const) t = f(coefs, const, m) elif tag == ">=": for k in vars: coefs[k] = r_coefs.get(k, 0) - l_coefs.get(k, 0) const = l_const - r_const t = f(coefs, const, m) pass elif tag == "<": for k in vars: coefs[k] = l_coefs.get(k, 0) - r_coefs.get(k, 0) const = r_const - l_const - 1 t = f(coefs, const, m) elif tag == ">": for k in vars: coefs[k] = r_coefs.get(k, 0) - l_coefs.get(k, 0) const = l_const - r_const - 1 t = f(coefs, const, m) else: assert False return t
def look_into_floor(to_int: Tree, vars: Dict[str, Tuple[int, int]], lets: Dict[str, Any]) -> Tuple[Tree, List[int], int]: def get_bounds(coefs: Dict[str, Fraction], const: Fraction) -> Tuple[Fraction, Fraction]: val_min = Fraction(0) val_max = Fraction(0) assert set(vars.keys()) >= set(coefs.keys()) for k in coefs.keys(): if coefs[k] >= 0: val_min += coefs[k] * vars[k][0] val_max += coefs[k] * vars[k][1] else: val_min += coefs[k] * vars[k][1] val_max += coefs[k] * vars[k][0] val_min += const val_max += const return val_min, val_max assert get_tag(to_int[0]) in ["to_int", "floor"] content_to_int = to_int[1] coefs, const = get_coefs_fraction(content_to_int, lets) assert set(coefs.keys()) <= set(vars.keys()) # TODO content_min_q, content_max_q = get_bounds(coefs, const) # content_min = int(math.ceil(content_min_q)) content_min = int(math.floor(content_min_q)) content_max = int(math.floor(content_max_q)) values_to_int = list(range(content_min, content_max + 1)) den = lcm_many([q.denominator for q in coefs.values()] + [const.denominator]) num_coefs = {k: int(v * den) for k, v in coefs.items()} num_const = int(const * den) expression = ["+"] + [["*", k, convert_int2list(v)] for k, v in num_coefs.items()] + [num_const ] # type: ignore return expression, values_to_int, den
def run_on_kubernetes(args): create_gcloud_secret() context = util.get_cluster_prefix() namespace = util.get_current_namespace() if len(args.number) == 0: # Figure out the nodes based on the names of persistent disks, or just node 0 if none. args.number = range( max(1, len(get_persistent_disks(context, namespace)))) if 'storage-projects' not in util.get_services(): util.run(['kubectl', 'create', '-f', 'conf/service.yaml']) args.local = False # so tag is for gcloud tag = util.get_tag(args, NAME, build) if not args.tag: tag = tag[:tag.rfind( '-')] # get rid of the final -[service] part of the tag. t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read() ensure_ssh() for number in args.number: deployment_name = "{name}{number}".format(name=NAME, number=number) ensure_persistent_disk_exists(context, namespace, number, args.size, args.type) with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp: tmp.write( t.format(image=tag, number=number, gcloud_bucket=gcloud_bucket(namespace=namespace), pd_name=pd_name(context=context, namespace=namespace, number=number), health_delay=args.health_delay, pull_policy=util.pull_policy(args))) tmp.flush() util.update_deployment(tmp.name)
def build_docker(args): tag = util.get_tag(args, NAME) build(tag, args.rebuild) if not args.local: util.gcloud_docker_push(tag)
def help(tree: Any, def2types: Dict[str, str]) -> Tuple[Any, str]: if type(tree) is int: if tree < 0: sub, subtype = help(-tree, def2types) res_term, res_type = ["bvneg", sub], "int" else: res_term, res_type = ["_", f"bv{tree % m}", bitsize], "int" elif type(tree) is str: tag = get_tag(tree) assert tag != "dummy" if is_var(tag): undectag = undecorate(tag)[0] typ = var2type[undectag] if signed == "depends" else "dummy" res_term, res_type = tag, typ elif is_def_in_let(tag): res_term, res_type = tag, def2types[tag] elif tag in ["true", "false"]: res_term, res_type = tag, "bool" elif tag in ["*nondetint"]: res_term, res_type = tag, "int" elif tag in ["*nondetunsigned"]: res_term, res_type = tag, "unsigned" elif tag.lstrip("+-").isnumeric(): res_term, res_type = help(int(tag), def2types)[0], "int" elif re_hex.match(tag.lstrip("+-")) is not None: res_term, res_type = help(int(tag, 16), def2types)[0], "int" else: assert False elif type(tree) is bool: res_term, res_type = tree, "bool" elif type(tree) is list: if type(tree[0]) is str: tag = get_tag(tree[0]) if tag == "-": if len(tree) == 2: sub, subtype = help(tree[1], def2types) res_term, res_type = ["bvneg", sub], subtype elif len(tree) == 3: sub1, subtype1 = help(tree[1], def2types) sub2, subtype2 = help(tree[2], def2types) res_term, res_type = ["bvadd", sub1, ["bvneg", sub2] ], align_int(subtype1, subtype2) else: assert False elif tag in ["%", "mod", "div"]: assert len(tree) == 3 left, lefttype = help(tree[1], def2types) right, righttype = help(tree[2], def2types) if right[1] == "bv0": if dic[tag] == "bvdiv": res_term, res_type = help(0, def2types)[0], lefttype elif dic[tag] == "bvurem": res_term, res_type = left, lefttype else: assert False else: res_term, res_type = [dic[tag], left, right], align_int( lefttype, righttype) # if int(right[1][2:]) >= m: # res_term, res_type = left # else: # res_term, res_type = ["bvurem", left, right] elif tag in ["*", "+"]: subtrees_and_types = [help(i, def2types) for i in tree[1:]] subtrees = [i[0] for i in subtrees_and_types] subtypes = [i[1] for i in subtrees_and_types] subtype = functools.reduce(align_int, subtypes) res_term, res_type = [dic[tag]] + subtrees, subtype elif tag in ["and", "or"]: subtrees_and_types = [help(i, def2types) for i in tree[1:]] subtrees = [i[0] for i in subtrees_and_types] subtypes = [i[1] for i in subtrees_and_types] assert all(i == "bool" for i in subtypes) res_term, res_type = [tag] + subtrees, "bool" elif tag in ["ite"]: assert len(tree) == 4 subtrees_and_types = [help(i, def2types) for i in tree[1:]] subtrees = [i[0] for i in subtrees_and_types] subtypes = [i[1] for i in subtrees_and_types] assert subtypes[0] == "bool" if subtypes[1] == "bool" and subtypes[2] == "bool": subtype = "bool" else: subtype = align_int(subtypes[1], subtypes[2]) res_term, res_type = [tag] + subtrees, subtype elif tag in ["not"]: assert len(tree) == 2 left, lefttype = help(tree[1], def2types) assert lefttype == "bool" res_term, res_type = ["not", left], "bool" elif tag in ["=>"]: assert len(tree) == 3 left, lefttype = help(tree[1], def2types) right, righttype = help(tree[2], def2types) assert lefttype == "bool" and righttype == "bool" res_term, res_type = ["=>", left, right], "bool" elif tag in ineq_signed: # equivalent to tag in ineq_unsigned sub1, subtype1 = help(tree[1], def2types) sub2, subtype2 = help(tree[2], def2types) subtype = align_int(subtype1, subtype2) if signed == "signed": applytype = "int" elif signed == "unsigned": applytype = "unsigned" elif signed == "depends": applytype = subtype else: assert False if applytype == "int": res_term, res_type = [ineq_signed[tag], sub1, sub2], "bool" elif applytype == "unsigned": res_term, res_type = [ineq_unsigned[tag], sub1, sub2], "bool" else: assert False elif tag == "=": subtrees_and_types = [help(i, def2types) for i in tree[1:]] subtrees = [i[0] for i in subtrees_and_types] subtypes = [i[1] for i in subtrees_and_types] subtypes = [ "integer" for i in subtypes if i in ["int", "unsigned"] ] aligned = len(set(subtypes)) <= 1 assert aligned res_term, res_type = ["="] + subtrees, "bool" elif tag == "let": assert len(tree) == 3 defs = tree[1] content = tree[2] defs_replaced_both = [[v, help(w, def2types)] for v, w in defs] defs_replaced = [[v, w[0]] for v, w in defs_replaced_both] defs_replaced_type = [[v, w[1]] for v, w in defs_replaced_both] def2types.update(dict(defs_replaced_type)) content_replaced, subtype = help(content, def2types) res_term, res_type = [ "let", defs_replaced, content_replaced ], subtype else: raise Exception(f"Unexpected symbol {tag}") elif type(tree[0][0]) == str: tag = get_tag(tree[0][0]) if tag == "_": tag = get_tag(tree[0][1]) if tag == "divisible": divisor = int(tree[0][2]) assert divisor > 0 left, lefttype = help(tree[1], def2types) assert lefttype in ["int", "unsigned"] if divisor >= m: right, righttype = help(0, def2types) res_term, res_type = ["=", left, right], "bool" else: right, righttype = help(tree[2], def2types) assert righttype in ["int", "unsigned"] res_term, res_type = [["_", "divisible", right], left], "bool" else: assert False else: assert False else: assert False return res_term, res_type
def help(tree: Any) -> Any: if type(tree) is list: tag = tree[0] if tag in ["bvadd", "bvdiv", "bvmul"]: d = {"bvadd": "+", "bvdiv": "div", "bvmul": "*"} return cover_mod([d[tag]] + [help(i) for i in tree[1:]]) elif tag in ["bvsle", "bvslt", "bvsgt", "bvsge"]: tree1 = help(tree[1]) tree2 = help(tree[2]) def case_sle(left, right): cond = [ "or", [ "and", ["<=", left, m // 2 - 1], ["<=", right, m // 2 - 1] ], ["and", [">=", left, m // 2], [">=", right, m // 2]] ] t = ["<=", left, right] f = [">=", left, m // 2] return ["ite", cond, t, f] if tag == "bvsle": return case_sle(tree1, tree2) elif tag == "bvslt": return ["not", case_sle(tree2, tree1)] elif tag == "bvsgt": return ["not", case_sle(tree1, tree2)] elif tag == "bvsge": return case_sle(tree2, tree1) else: assert False elif tag in ["bvule", "bvult", "bvuge", "bvugt"]: left = help(tree[1]) right = help(tree[2]) def case_ule(left, right): return ["<=", left, right] if tag == "bvule": return case_ule(left, right) elif tag == "bvult": return ["not", case_ule(right, left)] elif tag == "bvuge": return case_ule(right, left) elif tag == "bvugt": return ["not", case_ule(left, right)] else: assert False elif tag in ["=", "=>", "and", "or", "not"]: return [tag] + [help(i) for i in tree[1:]] elif tag in ["bvneg"]: return cover_mod(["-", help(tree[1])]) elif tag == "_": return int(get_tag(tree[1])[2:]) elif tag == "let": assert len(tree) == 3 defs = tree[1] content = tree[2] defs_replaced = [[v, help(w)] for v, w in defs] content_replaced = help(content) return ["let", defs_replaced, content_replaced] elif tag == "bvurem": return ["mod", help(tree[1]), help(tree[2])] else: assert False elif type(tree) is str: tag = get_tag(tree) if tag in ["true", "false"]: return tag elif is_var(tag) or is_def_in_let(tag): return tag elif tag.isnumeric(): return help(int(tag)) assert False elif type(tree) is int: assert 0 <= tree and tree < m return tree elif type(tree) is bool: return "true" if tree else "false" else: assert False
def build_docker(args): tag = util.get_tag(args, NAME) build(tag, args.rebuild) if not args.local: for service in SERVICES: util.gcloud_docker_push(full_tag(tag, service))
def get_tag(args, image): return util.get_tag(args, NAME + '-' + image)