def compile_by_domain_and_scope(domain, scope): common.print_verbose("Compiling " + scope) compile.compile( "domains/" + domain + "/src/" + scope + "/java", "target/domains/" + scope + "/" + domain, dependencies.classpath_for(domain, scope), )
def _project_compile(project): objectId = project.objectId with app.app_context(): try: _project_email( objectId, 'Compilation started for project {}'.format(objectId), 'Alaska has started compiling project {} for GEO submission.'. format(objectId)) with configure_scope() as scope: scope.set_tag('compile', objectId) compile(project) project.progress = 'compiled' project.save() _project_email( objectId, 'Compilation finished for project {}'.format(objectId), ('Alaska has finished compiling project {} for GEO submission. ' 'Please visit the unique URL to submit.').format(objectId)) except Exception as e: project.progress = 'success' project.save() _project_email( objectId, 'Compiliation failed for project {}'.format(objectId), ('Alaska encountered an error while compiling project {} for GEO submission.' '<br>{}<br>' 'Please submit an issue on Github if ' 'this keeps happening.').format(objectId, str(e)))
def main(): option = sys.argv if len(option) == 1: error(option, 1, 1) elif len(option) > 2: data = openfile(option) data = compile(data) data = assembly(data) name = 'a.exe' option.pop(0) op = 0 while len(option) > op: if option[op] in Option: if op + 1 < len(option): name = option[op + 1] + '.exe' op += 1 save(data, name) else: data = openfile(option) data = compile(data) data = assembly(data)
def benchmark(git_repo, src_dir, branches, git_origin, images, qmake_file, binary, clone_url=None): if (not os.path.exists(git_repo)): if (clone_url != None): git.clone_repo(clone_url, git_repo) git.fetch(git_repo) git.replace_remote(git_repo, clone_url) else: git.clone_repo(git_origin, git_repo) git.fetch_all(git_repo) results = {"computer_name": computer_name()} for branch in branches: git.checkout_branch(git_repo, branch) git.clean(src_dir) compile.compile(qmake_file, cwd=src_dir) r = {"revision": git.current_revision(git_repo)} r["results"] = run(src_dir + "/" + binary, images) results[branch] = r return results
def benchmark_local(src_dir, images, qmake_file, binary): results = {"computer_name": computer_name()} compile.compile(qmake_file,cwd=src_dir) r = {"revision": "local"} r["results"] = run(src_dir+"/"+binary, images) results["local"] = r return results
def benchmark_local(src_dir, images, qmake_file, binary): results = {"computer_name": computer_name()} compile.compile(qmake_file, cwd=src_dir) r = {"revision": "local"} r["results"] = run(src_dir + "/" + binary, images) results["local"] = r return results
def test_duplicate_labels(): with pytest.raises(DuplicateLabelException): compile(''' here : a another: b here : c ''')
def compile(self, interface, autocompile = False): import compile def on_success(): # A new local feed may have been registered, so reload it from the disk cache info(_("0compile command completed successfully. Reloading interface details.")) reader.update_from_cache(interface) for feed in interface.extra_feeds: self.policy.config.iface_cache.get_feed(feed.uri, force = True) self.policy.recalculate() compile.compile(on_success, interface.uri, autocompile = autocompile)
def compile(self, interface, autocompile = True): import compile def on_success(): # A new local feed may have been registered, so reload it from the disk cache info(_("0compile command completed successfully. Reloading interface details.")) reader.update_from_cache(interface) for feed in interface.extra_feeds: self.config.iface_cache.get_feed(feed.uri, force = True) import main main.recalculate() compile.compile(on_success, interface.uri, autocompile = autocompile)
def main(): elevate_to_admin() utils.log('Starting one_click_deploy') run_confirmation() options = configure.get_config_values() source.download_code(options) compile.compile(options) deploy.deploy(options) utils.log('Done!')
def NOtest_that_C_extension_compiles(self): FN = 'search_functions_ext.c' try: import search_functions_ext except: from compile import compile try: compile(FN) except: raise Exception('Could not compile %s' % FN) else: import search_functions_ext
def do_not_test_that_C_extension_compiles(self): # disabled. EQRM should not depend on numeric FN = 'polygon_ext.c' try: import polygon_ext except: from compile import compile try: compile(FN) except: raise 'Could not compile %s' % FN else: import polygon_ext
def do_not_test_that_C_extension_compiles(self): # disabled. EQRM should not depend on numeric FN = 'polygon_ext.c' try: import polygon_ext except: from compile import compile try: compile(FN) except: raise 'Could not compile %s' %FN else: import polygon_ext
def load_map(map, src_file, output_dir, cache_dir=None, datasources_cfg=None, verbose=False): """ Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map(). Parameters: map: Instance of mapnik.Map. src_file: Location of stylesheet .mml file. Can be relative path, absolute path, or fully-qualified URL of a remote stylesheet. output_dir: ... Keyword Parameters: cache_dir: ... datasources_cfg: ... verbose: ... """ scheme, n, path, p, q, f = urlparse(src_file) if scheme in ('file', ''): assert exists( src_file ), "We'd prefer an input file that exists to one that doesn't" if cache_dir is None: cache_dir = expanduser(CACHE_DIR) # only make the cache dir if it wasn't user-provided if not isdir(cache_dir): mkdir(cache_dir) chmod(cache_dir, 0755) dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file)) compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg).to_mapnik(map, dirs)
def POST_compile_svg (): # called by SVG editor when asked to compile an SVG + a script svg = request.forms.get("svg") instr = request.forms.get("instr") ox = request.forms.get("ox") oy = request.forms.get("oy") ow = request.forms.get("ow") oh = request.forms.get("oh") frame = request.forms.get("frame") minimize = request.forms.get("minimize") widthPerc = request.forms.get("widthPerc") try: widthPerc = int(widthPerc) except: widthPerc = 100 svg_tree = ET.fromstring(svg) instructions = compile.parse_instructions(instr) frame = True if frame == "true" else False minimize = True if minimize == "true" else False size = {"x":ox, "y":oy, "width":ow, "height":oh} result = compile.compile (svg_tree,instructions,size=size,frame=frame,noload=True,minimizeScript=minimize,widthPerc=widthPerc) return result
def lambda_handler(event, context): cpp = Preprocessor() cpp.add_path(os.getcwd() + "/lvmxlib") tmpf = io.StringIO("") with open("/tmp/main.c", mode='w') as f: f.write(event['body']) with open("/tmp/main.c", mode="r") as f: cpp.parse(f) cpp.write(tmpf) g.init("/tmp/main.c", tmpf.getvalue()) try: dumps = compile(g.source) except Exception as e: return {'statusCode': 400, 'body': g.r.report()} if dumps is None: return {'statusCode': 400, 'body': g.r.report()} bytecode = f".data {len(dumps['data'])}" + '\n' for elem in dumps['data']: bytecode += value2hex(elem) + '\n' bytecode += f".code {len(dumps['code'])}" + '\n' for elem in dumps['code']: bytecode += elem.serialize() + '\n' if g.r.hasError(): return {'statusCode': 400, 'body': g.r.report()} else: return {'statusCode': 200, 'body': bytecode}
def run_tests(tests): for name, expected, source in tests: if name[0] == '_': print('\x1b[33mSKIPPED\x1b[0m ' + name[1:]) continue try: compile(source) if not expected: print('\x1b[31mFAILED\x1b[0m ' + name + ' (expected error but succeeded)') sys.exit(1) except CompileError as e: if expected: print('\x1b[31mFAILED\x1b[0m ' + name + ':') print(e.msg[:-1]) sys.exit(1) print('\x1b[32mPASSED\x1b[0m ' + name)
def show_popup_menu(self, iface, bev): import bugs import compile have_source = properties.have_source_for(self.policy, iface) menu = gtk.Menu() for label, cb in [ (_('Show Feeds'), lambda: properties.edit(self.policy, iface)), (_('Show Versions'), lambda: properties.edit(self.policy, iface, show_versions=True)), (_('Report a Bug...'), lambda: bugs.report_bug(self.policy, iface)) ]: item = gtk.MenuItem(label) if cb: item.connect('activate', lambda item, cb=cb: cb()) else: item.set_sensitive(False) item.show() menu.append(item) item = gtk.MenuItem(_('Compile')) item.show() menu.append(item) if have_source: compile_menu = gtk.Menu() item.set_submenu(compile_menu) item = gtk.MenuItem(_('Automatic')) item.connect( 'activate', lambda item: compile.compile( self.policy, iface, autocompile=True)) item.show() compile_menu.append(item) item = gtk.MenuItem(_('Manual...')) item.connect( 'activate', lambda item: compile.compile( self.policy, iface, autocompile=False)) item.show() compile_menu.append(item) else: item.set_sensitive(False) menu.popup(None, None, None, bev.button, bev.time)
def compile_array(array, max_length): for i in range(len(array)): code = asm.asm(compile.compile(array[i]).split("\n")) if (len(code) < max_length): # pad to max_length code = pad_array(code, max_length - len(code), 0) elif len(code) > max_length: print("Warning: data sample size exceeds max length!") code = code[:max_length] array[i] = code
def main(args): try: parser = etree.XMLParser(dtd_validation=True) root = etree.parse(args.xml, parser).getroot() except etree.XMLSyntaxError as e: error('syntax', repr(e)) return 1 protocol = Protocol.parse(root) if not validate(protocol): return 1 if args.out: compile(protocol, args) if args.print: pretty(protocol)
def upload(): args = compile.compile('Unravel.mid','BB.mid') global number url = "song" + str(number) print(url) blob = bucket.blob(url) number += 1 blob.upload_from_filename(args) output = json.dumps({"link": generate_signed_url("hackthenorth-lads", url, 600000)}) return output
def main(): signal.signal(signal.SIGINT, handler) debug = int(os.environ.get('DCC_DEBUG', '0')) colorize_output = sys.stderr.isatty() or os.environ.get( 'DCC_COLORIZE_OUTPUT', False) if debug: print(sys.argv, 'DCC_RUN_INSIDE_GDB="%s" DCC_PID="%s"' % (os.environ.get( 'DCC_RUN_INSIDE_GDB', ''), os.environ.get('DCC_PID', '')), file=sys.stderr) if not sys.argv[1:] and 'DCC_RUN_INSIDE_GDB' in os.environ: drive_gdb() elif not sys.argv[1:] and 'DCC_PID' in os.environ: # we are invoked by the binary because an error has occurred start_gdb() elif sys.argv[1:] == ['--watch-stdin-for-valgrind-errors']: watch_valgrind() else: compile()
def process(fname, step=False, debug_compiler=False): global reg, i_mem, param, d_mem, total_ins, total_clocks i_mem = compile(fname + '.txt') reg['MIDR'] = i_mem[0] isError = False while reg['PC'] < len(i_mem): if (step): for attribute, value in reg.items(): print('{} : {}'.format(attribute, value)) x = input('Step at %i. Enter: CONTINUE, "s": STOP: IMSHOW --' % (reg['PC'])) print(d_mem[0:10]) if (x == 's'): break elif (x == 'i'): imshow('both') if (i_mem[reg['PC']] == 'END'): break if (debug_compiler): opcode_str, param = i_mem[reg['PC']] opcode = instructions[opcode_str] opcode() else: try: opcode_str, param = i_mem[reg['PC']] opcode = instructions[opcode_str] opcode() total_ins += 1 total_clocks += clocks[opcode_str] except: isError = True print('Error at line: ', reg['PC'] + 1, i_mem[reg['PC']], i_mem[reg['PC'] - 1]) break if (not isError): print('Simulation completed\n') try: print('Total Instructions: ', total_ins) print('Total Clocks: ', total_clocks) #imshow('both') #print(reg['AWG'], reg['AWT']) except: print('Error displaying image') print(d_mem[0:256])
def codeModified(self, event): try: binvalue = compile.compile(self.TEXT.get(1.0, tkinter.END), self.arch) self.RESULT.delete(1.0, tkinter.END) decompiled = compile.decompile(binvalue, self.arch) self.RESULT.insert(tkinter.END, decompiled) self.YARABOX.delete(1.0, tkinter.END) self.YARABOX.insert(tkinter.END, self.yaragen.update(decompiled)) except compile.Yarasm: pass
def benchmark(git_repo, src_dir, branches, git_origin, images, qmake_file, binary, clone_url=None): if (not os.path.exists(git_repo)): if (clone_url != None): git.clone_repo(clone_url, git_repo) git.fetch(git_repo) git.replace_remote(git_repo, clone_url) else: git.clone_repo(git_origin, git_repo) git.fetch_all(git_repo) results = {"computer_name": computer_name()} for branch in branches: git.checkout_branch(git_repo, branch) git.clean(src_dir) compile.compile(qmake_file,cwd=src_dir) r = {"revision": git.current_revision(git_repo)} r["results"] = run(src_dir+"/"+binary, images) results[branch] = r return results
def load_map(map, src_file, output_dir, cache_dir=None, datasources_cfg=None, verbose=False): """ Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map(). Parameters: map: Instance of mapnik.Map. src_file: Location of stylesheet .mml file. Can be relative path, absolute path, or fully-qualified URL of a remote stylesheet. output_dir: ... Keyword Parameters: cache_dir: ... datasources_cfg: ... verbose: ... """ scheme, n, path, p, q, f = urlparse(src_file) if scheme in ('file', ''): assert exists(src_file), "We'd prefer an input file that exists to one that doesn't" if cache_dir is None: cache_dir = expanduser(CACHE_DIR) # only make the cache dir if it wasn't user-provided if not isdir(cache_dir): mkdir(cache_dir) chmod(cache_dir, 0755) dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file)) compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg).to_mapnik(map, dirs)
def main_watcher(): currdir = Path(getfile(compile)).parent transcrypt_command = currdir / '.venv' / 'bin' / 'transcrypt' t = f'{transcrypt_command} -b -m -n {currdir}/main_compiled.py' with open('main.py') as main_py: with open(f'{currdir}/main_compiled.py', 'w') as main_compiled: # Stage 1 from Joachim compiled = compile(main_py.read()) main_compiled.write(compiled) main_compiled.close() # Stage 2: Transcrypt subprocess.call(t, shell=True, cwd=currdir)
def check_C_extension(sequential_allowed): """Verify existence of mpi.so. """ try: if sequential_allowed: import mpi # This will crash on systems like the Alpha Server or the Sun # if program is run sequentially else: # A more general test suitable for the Alpha Server is fid = open('mpi.so', 'r') fid.close() # On the other hand, when using pypar as a package, we do # not have access to mpi.so so we need link from parent directory to # pypar/mpi.so or pypar/mpi.c except: try: import compile compile.compile('mpi.c', 'mpicc', verbose=0) except: raise "ERROR: Please compile C extension mpi.c - python install.py"
def compile_and_compare(assembly, expected_machine_code, **compiler_args): __tracebackhide__ = True recieved_machine_code = compile(assembly, **compiler_args).strip('\n').replace('\t', ' ').rstrip() expected_machine_code = expected_machine_code.strip('\n').replace('\t', ' ').rstrip() print_as_columns( 'RECEIVED\n' + recieved_machine_code, 'EXPECTED\n' + expected_machine_code) recieved_machine_code = recieved_machine_code.strip().split('\n') expected_machine_code = expected_machine_code.strip().split('\n') lines = zip_longest(recieved_machine_code, expected_machine_code, fillvalue='') for i, (r_line, e_line) in enumerate(lines): r_line = r_line.strip() e_line = e_line.strip() # print('%03d: \'%s\'' % (i, assembly[i].strip())) assert(r_line == e_line)
def run(problem_id, submit_id, language, data_count, user_id): # 判题函数 protect.low_level() '''获取程序执行时间和内存''' time_limit, mem_limit = get_problem_limit(problem_id) program_info = { "submit_id": submit_id, "problem_id": problem_id, "take_time": 0, "take_memory": 0, "user_id": user_id, "result": 0, } # result_code = { # "Waiting": 0, # "Accepted": 1, # "Time Limit Exceeded": 2, # "Memory Limit Exceeded": 3, # "Wrong Answer": 4, # "Runtime Error": 5, # "Output limit": 6, # "Compile Error": 7, # "Presentation Error": 8, # "System Error": 11, # "Judging": 12, # } result_code = DBData.result_code if check_dangerout_code.check_dangerous_code(submit_id, language) == False: # 事先检查 program_info['result'] = result_code["Runtime Error"] return program_info compile_result = compile.compile(submit_id, language) if compile_result is False: # 编译错误 program_info['result'] = result_code["Compile Error"] return program_info if data_count == 0: # 没有测试数据 logging.error("没有测试数据") program_info['result'] = result_code["System Error"] return program_info result = judge_main.judge(data_count, time_limit, mem_limit, program_info, language) logging.debug(result) return result
def send_message(self, data): logger=log.getlogger() logger.info(data) print data if jsondecode.jsondecode(data,'language')==".net": #self._stream.write_to_fd(".net") self._stream.write_to_fd(compile.compile('.net',jsondecode.jsondecode(data,'tagname'),data)) self.read_message() elif jsondecode.jsondecode(data,'language')=="java": self._stream.write_to_fd(compile.compile('java', jsondecode.jsondecode(data,'tagname'),data)) self.read_message() elif jsondecode.jsondecode(data,'language')=="testok": compile.compile('testok',jsondecode.jsondecode(data,'tagname'),data) self._stream.write_to_fd('ok') self.read_message() elif jsondecode.jsondecode(data,'language')=="run": compile.compile('run',jsondecode.jsondecode(data,'tagname'),data) self._stream.write_to_fd('ok') self.read_message() else: self._stream.write_to_fd("no support language") self.read_message()
def work(i): db = pymysql.connect(host="127.0.0.1", user="******", password="******", db="oj", port=3306) try: problem_infor = {} cursor = db.cursor() #i = cursor.fetchone() # print(i) #debug problem_infor["run_id"] = i[0] # 题目运行id problem_infor["code"] = i[9] # debug代码,代码code problem_infor["id"] = i[1] # 题目编号 problem_infor["language"] = i[8] # debug语言编号 cursor.execute("select * from problem where id = %d " % int(problem_infor["id"])) one = cursor.fetchone() # fin = open(MyConfig.ans_in_file, "w+") # fout = open(MyConfig.ans_out_file, "w+") # fin = one[11] # fout = one[12] # fin.close() # fout.close() language_code = problem_infor["language"] codepath = "./" + str( problem_infor["run_id"]) + MyConfig.code_file[language_code] # codepath = MyConfig.code_file[language_code] fcode = open(codepath, "w+") fcode.write(problem_infor["code"]) fcode.close() if compile(MyConfig.language_code[language_code], codepath[2:], 'main' + str(problem_infor['run_id'])): ansinpath = MyConfig.ans_in_file + str(problem_infor["run_id"]) ansoutpath = MyConfig.ans_out_file + str(problem_infor["run_id"]) fin = open(ansinpath, "w+") fout = open(ansoutpath, "w+") fin.write(one[11]) fout.write(one[12]) fin.close() fout.close() judge_code = time_mem(MyConfig.language_code[language_code], ansinpath, ansoutpath, str(problem_infor["run_id"])) # print(judge_code) else: judge_code = {"result": 0, "time": -1, "memory": -1, "is_ac": "0"} judge_code["result"] = 8 # 编译错误 # print("compile failed") # cursor.execute() # print(judge_code)# debug try: os.remove(codepath) os.remove('./main' + str(problem_infor['run_id'])) except: pass cursor.execute( "update problem_status set result = %d,memory = %d,runtime = %d,code_len = %d where run_id = %d" % (int(judge_code["result"]), int(judge_code["memory"]), int(judge_code["time"]), len( problem_infor["code"]), int(problem_infor["run_id"]))) print(judge_code) except: pass db.commit() cursor.close() db.close()
from compile import compile if __name__ == '__main__': compile()
def test_duplicate_defines(): with pytest.raises(DuplicateDefineException): compile(''' a = 123 a = 123 ''')
return pic #Otherwise, download the image. return get_from_gdrive(id_from_gdrive_url(img_src), new_img_src) if __name__ == '__main__': from sys import argv # by default, run the server. if len(argv) <= 1 or argv[1] == 'test-server': import http.server as srv server = srv.HTTPServer(('', 8080), srv.SimpleHTTPRequestHandler) server.serve_forever() elif argv[1] == 'mentor-pics': set_image_sources('assets/mentors.json', image_processor) elif argv[1] == '-h': print("Run {} test-server to run a quick testing server.".format( argv[0])) print("Run {} mentor-pics to load the mentor images.".format(argv[0])) elif argv[1] == "build": import compile dirName = "views" globalData = {} layoutName = "layouts/layout.mustache" outputDir = "." compile.compile(dirName, layoutName, outputDir, globalData) else: print("Dude, wut?") print("Run '-h' for help.")
def load_map(map, input, target_dir=None, move_local_files=False): """ """ compile(input, target_dir=target_dir, move_local_files=move_local_files).to_mapnik(map)
def eval(self, ast, w_context): if self.use_bytecode: return BytecodeInterpreter(compile.compile(ast), w_context, self).run() else: method = getattr(self, "eval_" + ast.__class__.__name__) return method(ast, w_context)
def __init__(self, policy, interface, show_versions=False): self.policy = policy widgets = Template('interface_properties') self.interface = interface window = widgets.get_widget('interface_properties') self.window = window window.set_title(_('Properties for %s') % interface.get_name()) window.set_default_size(-1, gtk.gdk.screen_height() / 3) self.compile_button = widgets.get_widget('compile') self.compile_button.connect( 'clicked', lambda b: compile.compile(policy, interface)) window.set_default_response(gtk.RESPONSE_CANCEL) def response(dialog, resp): if resp == gtk.RESPONSE_CANCEL: window.destroy() elif resp == gtk.RESPONSE_HELP: properties_help.display() window.connect('response', response) notebook = widgets.get_widget('interface_notebook') assert notebook feeds = Feeds(policy, interface, widgets) stability = widgets.get_widget('preferred_stability') stability.set_active(0) if interface.stability_policy: i = [stable, testing, developer].index(interface.stability_policy) i += 1 if i == 0: warn(_("Unknown stability policy %s"), interface.stability_policy) else: i = 0 stability.set_active(i) def set_stability_policy(combo): i = stability.get_active() if i == 0: new_stability = None else: name = stability.get_model()[i][0].lower() new_stability = stability_levels[name] interface.set_stability_policy(new_stability) writer.save_interface(interface) policy.recalculate() stability.connect('changed', set_stability_policy) self.use_list = ImplementationList(policy, interface, widgets) self.update_list() feeds.tv.grab_focus() def updated(): self.update_list() feeds.updated() self.shade_compile() window.connect('destroy', lambda s: policy.watchers.remove(updated)) policy.watchers.append(updated) self.shade_compile() if show_versions: notebook.next_page()
% ######### ######## ### ######## % % % % Block Oriented Programming Compiler % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ''')) print comments print "[*] Starting BOPC %s at %s" % ( VERSION, bolds(now.strftime("%d/%m/%Y %H:%M"))) # ------------------------------------------------------------------------- # BOPC operation: Emit SPL IR # ------------------------------------------------------------------------- if args.emit_IR and args.source: IR = C.compile(args.source) IR.compile() # compile the SPL payload IR = O.optimize(IR.get_ir()) IR.optimize(mode=args.optimizer) # optimize IR (if needed) IR.emit(args.source) # ------------------------------------------------------------------------- # BOPC operation: Trace Search # ------------------------------------------------------------------------- elif args.source and args.entry: IR = C.compile(args.source) IR.compile() # compile the SPL payload IR = O.optimize(IR.get_ir())
def NOOP(): INPC() def END(): print('END reached') instructions = { 'TOGL': TOGL, 'LOAD': LOAD, 'LADD': LADD, 'LODK': LODK, 'STAC': STAC, 'JUMP': JUMP, 'ADD': ADD, 'INCR': INCR, 'DECR': DECR, 'DIV': DIV, 'SUBT': SUBT, 'RSET': RSET, 'COPY': COPY, 'NOOP': NOOP, 'END': END } stop_breaks = False step = False i_mem = compile('DIVdownsample.txt') reg['MIDR'] = i_mem[0] param = ''
# Contact address: [email protected] # # Version 1.0 October 2001 # ============================================================================= """Module install.py - Compile C extension for pypar """ import os, sys # Attempt to compile mpi.so # try: import compile compile.compile('mpi.c', 'mpicc', verbose = 1) except: raise "Could compile C extension mpi.c - please try manually" if sys.platform in ['osf1V5', 'sunos5']: #Compaq AlphaServer or Sun sys.exit() # Alpha Server or Sun cannot import MPI on sequential processes # OK with LAM #Check if MPI module can be initialised # # Attempt to import and initialise mpi.so error = os.system('python -c "import mpi" > /dev/null') if error:
def run(self): compile('python', os.path.join(here, 'pokemongoproto')) install.run(self)
# options # optimize is set to 1 of py2app to avoid errors with pymysql # bundle_files = 1 or 2 was causing failed builds so we moved # to bundle_files = 3 and Inno Setup options = {'py2exe': {'bundle_files': 3, 'compressed': 2, 'optimize': 1, 'packages': packages, 'includes': includes, 'excludes': excludes, }, 'py2app': {'packages': ['retriever'], 'includes': includes, 'site_packages': True, 'resources': [], 'optimize': 1, 'argv_emulation': True, 'no_chdir': True, 'iconfile': 'osx_icon.icns', }, }, ) try: from compile import compile compile() except: pass
def update(PAK, MIRROR, WORKDIR): """ Update package 'PAK' in packages.rules. MIRROR is either a http or a ftp URL WORKDIR is the root directory to install everything to. """ if( MIRROR.endswith("/PAK/") == False ): MIRROR = MIRROR+"/PAK/" elif( MIRROR.endswith("PAK/") == False): MIRROR = MIRROR+"PAK/" # --- Below this line: SETUP VARIABLES --- if(PAK.endswith(".json")==False): if(os.path.isdir("packages.rules"+os.sep+PAK)): return PAK=PAK+".json" # Easier JSON reading in the future. # Works better than constantly appending '.json' to PAK. TARFILE=packages.getTarfile(PAK) if (TARFILE == None): print( PAK+" doesn't point to a file. What do?") sys.exit(1) if (TARFILE.startswith("http://") == True or TARFILE.startswith("ftp://") == True or TARFILE.startswith("https://") == True): FILE = TARFILE.split("/")[-1] MIRROR=TARFILE[0:-len(FILE)] TARFILE=TARFILE[-len(FILE):] if( TARFILE.endswith("bz2") == True): TARTYPE = "bz2" elif( TARFILE.endswith( "gz") == True): TARTYPE = "gz" else: print( bcolors.RED+"* WARNING"+bcolors.ENDC+": Tar file isn't bz2 or gzip") NAME = json.loads( open( "packages.rules"+os.sep+PAK).read() ).get("name") if (NAME == None): NAME=TARFILE[:-8] INSTDIR=packages.getInstdir(PAK) if( os.path.exists(WORKDIR) == False): os.mkdir(WORKDIR) DEPENDS = [] DEPENDS=packages.getDepend(PAK) bcolors.YES = bcolors.GREEN+"Y"+bcolors.ENDC bcolors.NO = bcolors.RED+"n"+bcolors.ENDC if( DEPENDS != None): for DEPEND in DEPENDS: DEPENDTAR = packages.getTarfile(DEPEND+".json") if( os.path.exists( "packages.checksum"+os.sep+DEPENDTAR+".checksum" ) == True): DEPENDS.remove(DEPEND) continue if( DEPENDS != [] ): print( PAK+" depends on" ) print( DEPENDS ) print( "Install above dependencies?"+" ["+bcolors.YES+"/"+bcolors.NO+"]") IN = input() if( IN.endswith("\r")==True ): IN = IN[:-1] while( IN.lower() != "y" or IN.lower() != "yes"): if( IN.lower() == "n" or IN.lower() == "no"): return if( IN.lower() == "y" or IN.lower() == "yes"): break if( IN.lower() == "" ): break print( "Sorry, answer not recognized '"+IN+"'" ) IN = input() if( IN.endswith("\r")==True ): IN = IN[:-1] for DEPEND in DEPENDS: RETCODE = update(DEPEND, MIRROR, WORKDIR) if( RETCODE == 1): sys.exit(1) DEPENDS.remove(DEPEND) INSTALLSCRIPT = packages.getInstallScript(PAK) # If this isn't needed, we won't use it. # --- Below this line: PRINT PACKAGE CRITICAL INFO --- print( bcolors.GREEN+"*"+bcolors.ENDC+" "+"Name"+": "+NAME) print( bcolors.GREEN+"*"+bcolors.ENDC+" "+"Package JSON"+": "+PAK) print( bcolors.GREEN+"*"+bcolors.ENDC+" "+"Package file"+": "+TARFILE) # --- Below this line: ACTUALLY UPDATE PACKAGE 'PAK' --- print(">>> Fetching checksum"+" '"+TARFILE+".checksum'") get.download(args.VERBOSE,MIRROR+TARFILE+".checksum", WORKDIR+TARFILE+".checksum") m = open(WORKDIR+TARFILE+".checksum", 'r').read() print( bcolors.GREEN+"*"+bcolors.ENDC+" SHA-1: "+m.split("\n")[0] ) if ( os.path.exists("packages.checksum"+os.sep+TARFILE+".checksum") == False ): if ( os.access("packages.checksum"+os.sep, os.F_OK) == False): os.mkdir("packages.checksum") print( ">>> Downloading"+" '"+TARFILE+"' -> '"+WORKDIR+TARFILE+"'") get.download( True, MIRROR+TARFILE, WORKDIR+TARFILE ) m = hashlib.sha1(open(WORKDIR+TARFILE, "rb").read()).hexdigest() else: mm = open("packages.checksum"+os.sep+TARFILE+".checksum", "r").read() if ( m == mm ): print( "Project is up-to-date!") clean.rm(WORKDIR+TARFILE+".checksum") return else: print( ">>> Downloading"+" '"+TARFILE+"' -> '"+WORKDIR+TARFILE+"'") get.download( True, MIRROR+TARFILE, WORKDIR+TARFILE ) m = hashlib.sha1(open(WORKDIR+TARFILE, "rb").read()).hexdigest() # --- Below this line: EXTRACT UPDATE PACKAGE 'PAK' --- FILE = tarfile.open(WORKDIR+TARFILE, 'r') if( args.VERBOSE == False ): print( ">>> Extracting '"+TARFILE+"' -> '"+WORKDIR+INSTDIR+"'") FILE.extractall(WORKDIR+INSTDIR) else: print( ">>> Extracting '"+FILE.name.split(os.sep)[-1]+"'" ) MEMBERS=FILE.getmembers() for MEMBS in MEMBERS: print( ">>> '"+WORKDIR+INSTDIR+MEMBS.name+"'") FILE.extract(MEMBS.name, WORKDIR+INSTDIR) FILE.close() del FILE clean.rm(WORKDIR+TARFILE) # --- Below this line: INSTALLSCRIPT --- CXX = compile.check(PAK, WORKDIR=WORKDIR) if( CXX != None): print( bcolors.GREEN+"*"+bcolors.ENDC+" CXX: "+CXX) CXX = compile.compile(PAK, CXX, WORKDIR) if( CXX != None and CXX != 0 ): print( bcolors.RED+"[ERROR]: Couldn't compile!" ) print( ">>> Cleaning up and returning 1!"+bcolors.ENDC ) # clean.rm( WORKDIR+INSTDIR ) -- NO U clean.rm( WORKDIR+TARFILE ) clean.rm( WORKDIR+TARFILE+".checksum" ) return 1 del CXX # --- Below this line: sha1 hash generation --- mOut = open("packages.checksum"+os.sep+TARFILE+".checksum", "w") mOut.write(m) mOut.close() del m os.remove(WORKDIR+TARFILE+".checksum") # --- Below this line: Project is now installed, write to "packages.installed" --- if( os.path.isfile( "packages.installed" ) == False): PACKINST = [] else: PACKINST = open( "packages.installed", "r" ).read().split("\n") while( PACKINST.count(PAK[:-5]) > 0 ): PACKINST.remove(PAK[:-5]) while( PACKINST.count("\n") > 0 ): PACKINST.remove("\n") while( PACKINST.count("") > 0 ): PACKINST.remove("") PACKOUT = open( "packages.installed", "w" ) for LINES in PACKINST: PACKOUT.write(LINES+"\n") PACKOUT.write(PAK[:-5]+"\n") PACKOUT.close() del PACKOUT # --- Below this line: REMOVE UNNEEDED FILES (IF THEY EXIST) --- CLEANUP = [WORKDIR+TARFILE, WORKDIR+TARFILE+".checksum"] for FILES in CLEANUP: if( os.path.exists(FILES) == False): continue clean.rm( FILES ) del CLEANUP
def compile(x): return c.compile(parse(x))
def compile(s,target="cpy3"): return c.compile(build(s),target)
#!/usr/bin/env python from sys import argv from compile import compile from table2csv import convert if __name__ == "__main__": if len(argv) > 1: if argv[1] == 'compile': compile(argv[2:]) elif len(argv) > 2: if argv[1] == 'fetch': argv.remove('fetch') fetch = __import__('_sites.' + argv[1], globals(), locals(), ['fetch']) for url in argv[2:]: title, tables = fetch.fetch(url) i = 1 for table in tables: convert(table, title + '-' + str(i), argv[1]) i += 1
""" import os, sys # Attempt to compile mpiext.so # if sys.platform in ['osf1V5']: #Compaq AlphaServer do not have mpicc (grr) MPICC = 'cc -lmpi' else: MPICC = 'mpicc' try: import compile compile.compile('mpiext.c', MPICC, verbose = 1) except Exception, e: msg = 'Could not compile C extension mpiext.c - please try manually: %s' % e raise Exception(msg) if sys.platform in ['osf1V5', 'sunos5']: #Compaq AlphaServer or Sun sys.exit() # Alpha Server or Sun cannot import MPI on sequential processes # OK with LAM #Check if MPI module can be initialised # # Attempt to import and initialise mpiext.so
import os.path import aceutils import compile import sys type = 'cs' if len(sys.argv) == 2: type = sys.argv[1] if len(sys.argv) == 3: type = sys.argv[2] compile.init() if type=='java': compile.compile('cs') compile.compile(type) aceutils.cdToScript() aceutils.cd(r'../') def editFiles(targetDir): files = [] for f in aceutils.get_files(targetDir): basename = os.path.basename(f) ext = os.path.splitext(basename)[1] if ext == '.java': if 'Recorder' in basename: aceutils.rm(f) else:
if dirname[-1] != os.sep: dirname += os.sep curdir = os.getcwd() os.chdir(dirname) cmdstring = '"import fwt_ext, ifwt_ext"' s = 'python -c %s ' %cmdstring error = os.system(s) if error: print "Trying to recompile c-extensions" #Remove any previous extensions extensions = ['fwt_ext.dll', 'ifwt_ext.dll', 'fwt_ext.so', 'ifwt_ext.so'] for ext in extensions: try: os.remove(ext) except: pass from compile import compile compile('fwt_ext.c', verbose = 1) compile('ifwt_ext.c', verbose = 1) os.chdir(curdir)