def test_call_generate_values(): call = GuestCall( definition_environment=test_environment(), definition_block=SyntaxBlock( [ SyntaxExpression( [ SyntaxNumber(int32(2)), SyntaxNumber(int32(3)), SyntaxNumber(int32(4)), ] ) ] ), ) block = SyntaxBlock( [SyntaxExpression([SyntaxIdentifier("seq")])] ) expression_stack = MachineExpressionStack( [MachineNumber(int32(1))] ) environment = test_environment({"seq": call}) interpret(block, expression_stack, environment) assert expression_stack == MachineExpressionStack( [ MachineNumber(int32(1)), MachineNumber(int32(2)), MachineNumber(int32(3)), MachineNumber(int32(4)), ] )
def call_test(binding, args, results): term = SyntaxIdentifier(binding.name) block = SyntaxBlock([SyntaxExpression([term])]) expression_stack = MachineExpressionStack(args) environment = test_environment() interpret(block, expression_stack, environment) assert expression_stack == MachineExpressionStack(results)
def main(args): # Creating Datasets createDatasets(args, DatasetsTypes, ImpTimeSteps, ImpFeatures, StartImpTimeSteps, StartImpFeatures, Loc1, Loc2, FreezeType, isMoving, isPositional, DataGenerationTypes) #Train Models train_models(args, DatasetsTypes, DataGenerationTypes, models, device) #Decreasing batch size for captum args.batch_size = 10 #Get Saliency maps interpret(args, DatasetsTypes, DataGenerationTypes, models, device) #create Masks createMasks(args, DatasetsTypes, DataGenerationTypes, models) #Get Masked Accuracy getMaskedAccuracy(args, DatasetsTypes, DataGenerationTypes, models, device) #Get precsion and recall getPrecisionRecall(args, DatasetsTypes, DataGenerationTypes, models) #Get AUC, AUR, AUP and AUPR getAccuracyMetrics(args, DatasetsTypes, DataGenerationTypes, models) #For Feature and time level precsion and recall getFeatureTimePrecisionRecall(args, DatasetsTypes, DataGenerationTypes, models) args.Feature_PrecisionRecall = True args.Time_PrecisionRecall = True getAccuracyMetrics(args, DatasetsTypes, DataGenerationTypes, models)
def main(): curr_lst, exp_lst = cv_imgs(sys.argv[1]) #1 is exp, 0 != exp is_exp = expo(exp_lst) final_array = [] for item in curr_lst: curr_val = get_rep(item) final_array.append(curr_val) final_str = "" flag = False print(final_array) print(is_exp) for index in range(len(is_exp)): if is_exp[index] == 0: if flag == False: final_str += final_array[index] else: final_str += (")" + final_array[index]) flag = False else: if flag == False: final_str += "**(" + final_array[index] if index == len(is_exp) - 1: final_str += ")" else: flag = True else: final_str += final_array[index] print('Your Equation is: ' + final_str) eval = interpret(final_str) print(eval)
def main() -> None: o = open("out/extracted.json", "w") p = 'conjectures/extracted' files = [os.path.join(p, f) for f in os.listdir(p)] files.sort() descs = [] for f in files: original_file = os.path.splitext(os.path.basename(f))[0] base = "-".join(original_file.split("-")[:-1]) conj = original_file.split("-")[-1] print(f) try: (sig, axioms, conjectures, models) = interpret(parse(open(f).read())) assert len(conjectures) == 1 descs.append({'base': base, 'conjecture': conj, 'file': f, 'quantifiers': count_quantifiers(conjectures[0]), 'max_quantifier_depth': max_quantifier_depth(conjectures[0]), 'existentials': count_existentials(conjectures[0]), 'max_term_depth': max_term_depth(conjectures[0]), 'golden_formula': str(conjectures[0]) }) except (SyntaxError, SemanticError) as e: print("File ", f, "was not valid", str(e)) json.dump(descs, o, indent=1) o.close()
def test_interpret(): block = SyntaxBlock([ SyntaxExpression([ SyntaxNumber(int32(1)), SyntaxNumber(int32(2)), SyntaxNumber(int32(3)), ]) ]) expression_stack = MachineExpressionStack([]) environment = test_environment() interpret(block, expression_stack, environment) assert expression_stack == MachineExpressionStack([ MachineNumber(int32(1)), MachineNumber(int32(2)), MachineNumber(int32(3)), ])
def test_call_delegate_to_host_call(): call = GuestCall( definition_environment=test_environment(), definition_block=SyntaxBlock( [SyntaxExpression([SyntaxIdentifier("sqrt")])] ), ) block = SyntaxBlock( [SyntaxExpression([SyntaxIdentifier("f")])] ) expression_stack = MachineExpressionStack( [MachineNumber(int32(16))] ) environment = test_environment({"f": call}) interpret(block, expression_stack, environment) assert expression_stack == MachineExpressionStack( [MachineNumber(int32(4))] )
def obtain_input(loc,exp,tag): process_id=2 hear="" r = sr.Recognizer() with sr.AudioFile(loc) as source: audio = r.record(source) try: hear=r.recognize_google(audio) print(hear) if tag!=1: interpret.interpret(hear,exp) if tag==1: return hear #with open() except sr.UnknownValueError: print("------------------------------------------IDLE----------------------------------") except sr.RequestError as e: print("Could not request results from API service: {0}".format(e)) play_vr.play_vr("no_internet.wav")
def _test(instructions, initial_data, expected_output, expected_data, strict): output, data = interpret.interpret(instructions, initial_data) if output != expected_output: print('Incorrect output:') print(' Expected:', expected_output) print(' Got:', output) if strict and not _compare_data(expected_data, data): print('Incorrect data tape:') print(' Expected:', expected_data) print(' Got:', data) return output == expected_output and ( (not strict) or _compare_data(expected_data, data))
def main_run( source_filename: str, environment: Optional[MachineEnvironment] = None, ) -> None: with open(source_filename, "r") as source_file: source_text = source_file.read() root_block = parse(source_text) root_expression_stack = MachineExpressionStack([]) if not environment: environment = base_environment() interpret(root_block, root_expression_stack, environment) assert not root_expression_stack, root_expression_stack assert "main" in environment, environment.bindings main_call = environment["main"] assert isinstance(main_call, GuestCall), main_call main_expression_stack = MachineExpressionStack([]) interpret( main_call.definition_block, main_expression_stack, main_call.definition_environment, ) if main_expression_stack.values: print(main_expression_stack.values)
def interpret_file(self): order_count, city_numbers, order_info = interpret.interpret( self.import_directory) # To show data at a glance self.tb_order_info.setText( '총 발주 건수: ' + str(order_count) + '건' + '\n총 도시 갯수: ' + str(city_numbers) + '개 도시' + '\n도시별 발주건수: ') output = '' for i in range(len(order_info)): output += (f'{order_info[i][0]}: {order_info[i][1]}건\n') self.tb_order_info.append(output) # progress bar value changes slowly self.pb_status.setValue(0) self.completed = 0 while self.completed < 100: self.completed += 0.0001 self.pb_status.setValue(self.completed)
def op_load(c: AF_Continuation) -> None: filename = c.stack.pop().value f = None for file in [ filename, filename + '.a4', 'lib/' + filename, 'lib/' + filename + '.a4' ]: try: f = open(file) except FileNotFoundError: pass if not f: print("No file or module called '%s' found." % filename) else: op_pcsave(c) c.execute(interpret(c, f, filename)) op_pcreturn(c) if c.prompt: print(c.prompt, end='', flush=True)
def interpreter(language): # Run code if request.method == "POST": for lang in languages: if lang.name == language: code = request.form["codearea"] output = interpret(lang, request.form["codearea"]) return render_template("interpreter.html", language=lang, code=code, output=output) # Load page for lang in languages: if lang.name == language: code = lang.generateHelloWorld() return render_template("interpreter.html", language=lang, code=code, output=[]) return redirect("../../overview")
def main(): code_contents = open(sourcecode_filename) returned_c_code = interpret.interpret(code_contents.read()) code_contents.close() polished_code = bftypes.c_code.format( # Polish the code by injecting the generated C code into it as well as the presets. code = returned_c_code, memory_size = settings.memory_size ) # Simple argument system. We don't need anything more. if (arguments[0] == "--compile" or arguments[0] == "-c"): compile(polished_code, arguments[1]) elif (arguments[0] == "--save" or arguments[0] == "-s"): save(polished_code, arguments[1]) elif (arguments[0] == "--print" or arguments[0] == "-p"): print(polished_code) else: helpmenu()
#!/usr/bin/python2 from sys import argv from index import Index from interpret import interpret from token import Token from token import build_tokens def init_tokens(files): tokens = [] for filename in files: tokens += build_tokens(filename) return tokens def init_memory(size): memory = [] while len(memory) < size: memory.append(Index(0)) return memory if __name__ == '__main__': if len(argv) < 2: print 'You need to provide a file to interpret.' exit(-1) tokens = init_tokens(argv[1:]) memory = init_memory(30000) interpret(tokens, memory)
def do_repl(filename: str, handle: TextIO): global cont # Set Debug on or off initially. op_debug(cont) op_off(cont) #op_on(cont) # Checkpoint our initial setup. op_checkpoint(cont) print("ActorForth interpreter. ^C to exit.") print_words() while True: """ INTRO 1.4 : Continuously call the Interpreter until ^C is hit, the input file runs out of tokens to parse, or an exception is encountered. TODO: Likely probably want exceptions to just reset the stack/Continuation and resume. """ try: cont.execute(interpret(cont, handle, filename, prompt=cont.prompt)) """ INTRO 1.5 : If the last token in the input file is 'resume' then we re-establish stdin as our input file and continue at the repl with everything intact. This is a special hard-coded command. TODO: How to do this in a more forth-like manner? """ #if cont.stack.tos().value == "resume": if cont.symbol is not None and cont.symbol.s_id == "resume": handle = sys.stdin filename = "stdin" #cont.stack.pop() print_continuation_stats(cont) else: print("Clean?? exit! cont.symbol = %s." % cont.symbol) break except KeyboardInterrupt as x: print(" key interrupt.") break except Exception as x: """ INTRO 1.6 : Uncaught exceptions will interupt the interpreter, print status output, turn Debug on, reset the input to stdin and proceed again. INTRO 1.7 : Continue in interpret.py for INTRO stage 2. """ cont.log.error("REPL EXCEPTION TYPE %s : %s" % (type(x), x)) cont.log.error("TRACEBACK : %s" % traceback.format_exc()) print("REPL EXCEPTION TYPE %s : %s" % (type(x), x)) print("TRACEBACK : %s" % traceback.format_exc()) # Turn debug on automatically. op_debug(cont) op_on(cont) print_continuation_stats(cont) filename, handle = setup_stream_for_interpreter(force_stdio=True) print_continuation_stats(cont) print("\nend of line...") tos = cont.stack.tos() if tos != Stack.Empty: return tos.value else: return None
def test(test_dir): result = interpret(os.path.join(test_dir, 'script.txt')) with open(os.path.join(test_dir, 'answer.json'), 'r') as answer_file: answer = list(map(list_to_set, json.load(answer_file))) assert result == answer
def main(): print("\nEnter code to parse:") string = input() ast = Parse.build_ast(Parse.prepare(string)) print("\nAbstract syntax tree:", ast, sep="\n") print("\nResult:", interpret(ast))
from interpret import interpret from generator import generate from error_check import check_errors import libtcodpy as libtcod import engine import sys file = sys.argv[1] with open(file, 'r') as f: # processing the file and generating dicts string = f.read() specs_dict, player_dict, monsters_dict, behav_dict, spell_dict = interpret( string) check_errors(specs_dict, player_dict, monsters_dict, behav_dict, spell_dict) # generating game components map, entities, player = generate(specs_dict, player_dict, monsters_dict) name = specs_dict.get('name', 'DunGen') # setting up game window parameters and initialising panel_height = 10 screen_width = max(map.width, 80) screen_height = map.height + panel_height libtcod.console_set_custom_font( 'arial12x12.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD) libtcod.console_init_root(screen_width, screen_height, name) # the level generation loop while True: libtcod.console_clear(0)
def statement_loop(statement_text): block = parse(statement_text) expression_stack = MachineExpressionStack([]) interpret(block, expression_stack, environment) return expression_stack.values
import interpret print(interpret.interpret(list('[>[>+>+<<-]>>[-<<+>>]<<<-]>>.'), [5, 4]))
curr_lst, exp_lst = cv_imgs('IMG_6822.JPG') #1 is exp, 0 != exp is_exp = cluster(exp_lst) final_array = [] for item in curr_lst: curr_val = get_rep(item) final_array.append(curr_val) final_str = "" flag = False for index in range(len(is_exp)): if is_exp[index] == 0: if flag == False: final_str += final_array[index] else: final_str += (")" + final_array[index]) flag = False else: if flag == False: final_str += "**(" + final_array[index] if index == len(is_exp) - 1: final_str += ")" else: flag = True else: final_str += final_array[index] print('Your Equation is: ' + final_str) eval = interpret(final_str) print( eval)
import parse,interpret print(parse.program(['if', 'true', '{', 'print', '1', ';', '}'])) print(parse.number('10', False)) print(interpret.interpret('assign x := true; assign y := true; assign z:= 10; assign a := -1; while x==y{ if z<1{assign y := false;} print z; assign z:= z + a ;} print x; print y;')) print(interpret.interpret('assign x:= 0; while not(x < 1){print x; assign x:= x+0;} print false; ')) print(interpret.interpret('print 3 == 3;'))
import interpret, machine,parse import os print(machine.fresh()) print(machine.fresh()) print(interpret.interpret("procedure g {print 2;} procedure f {if true and true { call g; }} call g; call f;")) print(machine.simulate([\ 'set 10 4',\ 'set 3 6',\ 'set 4 2',\ 'copy'])) # print(simulate( # [\ # 'label name',\ # 'set 20 35'\ # ]\ # +call('name')\ # +call('name'))) body ="procedure h {print 3;} procedure g {print 2; call h; call h;} procedure f {call g; print 1; call g;} call f;" x=parse.tokenizeAndParse(body) print (x) insts = machine.compile(body) print(insts) print(machine.simulate(insts))
import interpret import frames import os # Main Calls for json_file in os.listdir('json_data'): html_name = json_file.split('.')[0] + '.html' frames.create_frame(html_name) interpret.interpret(html_name, 'json_data/' + json_file) frames.close_frame(html_name)
def eval(x): return i.interpret(parse(x))
def main(lon, lat): # Remove lon and lat for looping latest_obtained_url = urljoin('http://www.weather.gov.sg/files/rainarea/50km/v2/', 'dpsri_70km_1970010100000000dBR.dpsri.png') old_time = datetime(1970, 1, 1) base_x = [] base_y = [] old_data = None first_run = True url_list = [] # print("UTOWN Starbucks is LON: 103.773066 LAT: 1.305627") # print("Siglap is LON: 103.930742 LAT: 1.311571") # print("CCK AVE 4 is LON: 103.741708 LAT: 1.382027") while True: num_coordinates = 1 # int(input('Enter number of positions to monitor (1-10): ')) if num_coordinates < 1 or num_coordinates > 10: print("Number of positions must be from 1 to 10. Try again.") else: break for coord_number in range(0, num_coordinates): # print("Position #%s" % str(coord_number + 1)) base_x.append(float(lon)) # input('Enter Longitude in Decimal Degrees: '))) base_y.append(float(lat)) # input('Enter Latitude in Decimal Degrees: '))) while True: current = datetime.now() if current - old_time >= timedelta(seconds=20): # Check for updates every 20 seconds old_time = current timestamp = current.strftime("%d/%m/%Y at %H:%M:%S") url_list[:] = [] # Clears the list url_list = generate(url_list) latest_possible_url = identify_latest_possible(url_list) if latest_possible_url == latest_obtained_url: # print("No Updates on " + timestamp + " ; latest possible image already downloaded") continue latest_obtainable_url = identify_latest_obtainable(url_list) if latest_obtainable_url == "EOF": print("\nWARNING: Failed to access data for the past two hours; possible network failure") latest_obtainable_url = latest_obtained_url latest_url_time = datetime(int(latest_obtainable_url[60:64]), int(latest_obtainable_url[64:66]), int(latest_obtainable_url[66:68]), int(latest_obtainable_url[68:70]), int(latest_obtainable_url[70:72])) old_url_time = datetime(int(latest_obtained_url[60:64]), int(latest_obtained_url[64:66]), int(latest_obtained_url[66:68]), int(latest_obtained_url[68:70]), int(latest_obtained_url[70:72])) if latest_url_time <= old_url_time: continue # print("No Updates on " + timestamp + " ; awaiting availability of latest possible image") else: image = "%s/%s/%s %shrs" % (latest_obtainable_url[66:68], latest_obtainable_url[64:66], latest_obtainable_url[ 60:64], latest_obtainable_url[ 68:72]) try: temporary_image = io.BytesIO(request.urlopen(latest_obtainable_url).read()) except: print("Radar Image for %s failed to download on %s" % (image, timestamp)) else: # print("\nRadar Image for %s downloaded on %s" % (image, timestamp)) latest_obtained_url = latest_obtainable_url # Color quantize the radar image # pixel_color, width, height = convert_colors(str(latest_obtainable_url.rsplit('/', 1)[1])) try: pixel_color, width, height = convert_colors(temporary_image) except: print("Cannot convert colors") try: old_data = interpret(base_x, base_y, pixel_color, width, height, image, num_coordinates, old_data, first_run) except: print("Cannot interpret data") first_run = False return old_data, image, timestamp # Remove return old_data for looping
parsed_list.append(parse(tokenized, parsed_list)) tokenized.pop(0) elif first_char == ')': raise Exception("Syntax Error, unexpected paranthesis") elif first_char != ',': return categorize_token(tokenized.pop(0)) return parsed_list def categorize_token(token): """ """ if token[0] == token[-1] == '"': token = token[1:-1] else: try: token = int(token) except ValueError: try: token = float(token) except ValueError: return {'type': 'identifier', 'value': token} return {'type': 'literal', 'value': token} #import doctest #doctest.testmod() x ='((lambda (x) (lambda (y) x) 1) 1)' tokenized = parse(tokenize(x)) print tokenized print interpret.interpret(tokenized)