def Continue(num): modboxlist = [] for i in range(num): modbox = Start() modboxlist.append(modbox) numlist = np.arange(num).tolist() modboxdict = dict(zip(numlist, modboxlist)) superbox = Superbox(modboxlist[0].ux * num, modboxlist[0].uy, modboxlist[0].uz) for modbox in modboxlist: superbox.add_modbox(modbox) Build.build_data(superbox.atomlist, superbox)
def store(df): ''' Train the model and store the results Input: df: dataframe Outputs: normdict (dictionary): maps a variable name to its evaluation under normalization quantdict (dictionary): maps a variable name to its evaluation under quartilization Note: variables that are not supported for qcut (too many 0 values) are avoided Serious_df: evaluation dataframe for SeriousDlqin2yrs because SeriousDlqin2yrs itself is a dummy, I don't apply normalization nor quartilization ''' normdict = {} quantdict = {} temp = df.copy() for colname in df.columns: for is_norm in [True, False]: df = temp.copy() if colname != 'SeriousDlqin2yrs': df_disc, use_norm = Preprocess.discretize(df, colname, is_norm) if is_norm == use_norm: if use_norm: df_dum = Preprocess.create_dummy(df_disc, colname) target_dummy = '95% CI' else: df_dum = Preprocess.create_dummy(df_disc, colname, \ is_norm=False) target_dummy = 'upper 50%' x_train, x_test, y_train, y_test = Build.split_data(df_dum,\ target_dummy, \ colname) subdf = Build.DTclassifier(x_train, x_test, y_train, y_test) if use_norm: normdict[colname] = subdf else: quantdict[colname] = subdf else: target_dummy = colname x_train, x_test, y_train, y_test = Build.split_data(df,\ target_dummy, \ colname) Serious_df = Build.DTclassifier(x_train, x_test, y_train, y_test) return normdict, quantdict, Serious_df
def start_build(self, plan_id): plan = self.db.get_plan(plan_id) # example: # (1, 1, 1, 'retrieve', 0, None, None, None, None, None) if plan[3] == 'retrieve': # do retrieve # TODO: setting the plan in the list as 'busy' or something print('Starting a retrieve build...') #self.db.set_build_status(id, 'building') self.current_builds.append(Build('retrieve', self.db, plan_id)) elif plan[3] == 'deploy': # do deploy print('Starting a deployment build...') #self.db.set_build_status(id, 'building') self.current_builds.append(Build('deploy', self.db, plan_id))
def check_jni_headers(conf): if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env.JAVA_HOME: conf.fatal('set JAVA_HOME in the system environment') javaHome = conf.env['JAVA_HOME'][0] b = Build.BuildContext() b.load_dirs(conf.srcdir, conf.blddir) dir = b.root.find_dir(conf.env.JAVA_HOME[0] + '/include') f = dir.ant_glob('**/(jni|jni_md).h', flat=False) incDirs = [x.parent.abspath() for x in f] dir = b.root.find_dir(conf.env.JAVA_HOME[0]) f = dir.ant_glob('**/*jvm.(so|dll)', flat=False) libDirs = [x.parent.abspath() for x in f] or [javaHome] for i, d in enumerate(libDirs): if conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA'): break else: conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
def setUp(self): '''setup the foundations needed for tests''' self._bld = Build.BuildContext() # define & create temporary testing directories - # needed to make sure it will run in same manner always self._test_dir_root = tempfile.mkdtemp("", ".waf-testing_") self._wscript_file_path = os.path.join(self._test_dir_root, WSCRIPT_FILE) os.chdir(self._test_dir_root)
def make_bld(self): Options.commands['configure'] = False env = Environment.Environment() bld = Build.bld = Build.BuildContext() bld.set_env('default', env) blddir = os.path.join(self._test_dir_root, 'b') bld.load_dirs(self._test_dir_root, blddir) return bld
def main(configfilename): try: config = Build._load_data(configfilename) print 'I: read old config from', configfilename except IOError, SyntaxError: # IOerror: file not present/readable # SyntaxError: invalid file (platform change?) # if not set by Make.py we can assume Windows config = {'useELFEXE': 1}
def Start(inputfile='PVDF-model.cif', size=2000, dm=15, mv=[0, 0]): size = size / 2 box, atomlist = Init.Split_file(inputfile) num = int(pow(size / len(atomlist.value), 1 / 2) + 1) print('Now Generating a Supercell of ', 1, '*', num, '*', num, '\n') print('######## Now Initializing Supercell ########\n') modbox = Modbox(box.x, box.y, box.z, num, dm) modbox.divide_box(mv) modbox.add_atomlist() #print('Though required size was ',size,', in total we make ',len(modbox.atomlist.value),' atoms\n') print('######## Now Generating Plasticizer ########\n') numplast = Dummy.Calc_dummy(modbox.atomlist1) Add_plastic(modbox, numplast) modbox.add_atomlist() print('Though required size was ', size * 2, ', in total we make ', len(modbox.atomlist1.value + modbox.atomlist2.value), ' atoms, including ', len(modbox.atomlist2.value), ' atoms from plasticizers\n') mass1 = Dummy.Calc_mass(modbox.atomlist1) mass2 = Dummy.Calc_mass(modbox.atomlist2) print('The actual Mass Ratio is ', mass2 / (mass1 + mass2) * 100, '\n') Build.build_poscar(modbox.atomlist1, modbox, filename='Polymer') Build.build_poscar(modbox.atomlist2, modbox, filename='Plastic') Build.build_poscar(Init.Atomlist(modbox.atomlist1.value + modbox.atomlist2.value), modbox, filename='Combine')
def __init__(self, sim_name, mod_name, sim_params, **kwargs): # update the src name with the default if not given # kwargs['src_name'] = kwargs.get('src_name', sim_name + ".od3") # NOTE - each Ode simulation must have it's own src file (use symlinks if req'd) kwargs['src_name'] = sim_name + ".od3" kwargs['is_exe'] = False super().__init__(sim_name, **kwargs) self.startup_offset = 1.0 # on average for comp. time self.mod_name = mod_name self.sim_params = sim_params self.sim_params['backend'] = 'interpreter' self.script = Build._gen_sim_script(self.sim_name, self.mod_name, self.sim_params, True)
def generate_and_build(): """ Returns 0 on success, non-0 on failure. """ generate_result = Generate.generate() if generate_result != 0: print("Generate failed with return value '{}'".format(generate_result)) return generate_result build_result = Build.build() if build_result != 0: print("Build failed with return value '{}'".format(build_result)) return build_result return 0
def main(parser): opts, args = parser.parse_args() if not args: parser.error('Requires at least one scriptname file or exactly one .spec-file') # Skip configuring when using the same python as specified in config.dat try: config = Build._load_data(opts.configfile) if config['pythonVersion'] == sys.version: print 'I: skip Configure.py, use existing config', opts.configfile else: run_configure(opts, args) except IOError, SyntaxError: run_configure(opts, args)
def main(parser): opts, args = parser.parse_args() if not args: parser.error( 'Requires at least one scriptname file or exactly one .spec-file') # Skip configuring when using the same python as specified in config.dat try: config = Build._load_data(opts.configfile) if config['pythonVersion'] == sys.version: print 'I: skip Configure.py, use existing config', opts.configfile else: run_configure(opts, args) except IOError, SyntaxError: run_configure(opts, args)
def __init__(self, db): ''' General description: This function initializes the database variables and \ index to refer in functions. ''' DBUtil.__init__(self, db) self.collection = db.State self.deploymentunitapprovalstatusdb = DeploymentUnitApprovalStatus.DeploymentUnitApprovalStatus( ) self.DeploymentFieldsdb = DeploymentFields.DeploymentFields(db) self.buildDb = Build.Build() # indexes self.collection.create_index([('name', ASCENDING), ('parent_entity_id', ASCENDING)], unique=True)
def test_white_no_sources_specified(self): # white-box test: no sources were specified # add apply_verif to taskgen import Tools.ccroot Options.commands['configure'] = False env = Environment.Environment() bld = Build.bld = Build.BuildContext() bld.set_env('default', env) blddir = os.path.join(self._test_dir_root, 'b') bld.load_dirs(self._test_dir_root, blddir) obj = TaskGen.task_gen(bld=bld) # TODO: make sure it works with apply_core too self.failUnlessRaises(Utils.WafError, obj.apply_verif)
def __init__(self, db): ''' General description: This function initializes the database variables and \ index to refer in functions. ''' DBUtil.__init__(self, db) self.collection = db.Versions self.deploymentFieldsDB = DeploymentFields.DeploymentFields(db) self.buildDB = Build.Build() self.documentsDB = Documents.Documents(db) self.mediaFilesDB = MediaFiles.MediaFiles(db) # indexes self.collection.create_index([('tool_id', ASCENDING), ('version_name', ASCENDING), ('version_number', ASCENDING)], unique=True)
def test_incorrect_version(self): # white-box test: configured with old version Options.commands['configure'] = False bld = Build.BuildContext() bld.blddir = os.path.join(self._test_dir_root, 'b') # this will create the cachedir... self.failUnlessRaises(Utils.WafError, bld.load_dirs, bld.blddir, bld.blddir) os.makedirs(bld.cachedir) # create build cache file with OLD version cachefile = os.path.join(bld.cachedir, 'build.config.py') file = open(cachefile, 'w') file.writelines("version = 0.0") file.close() self.failUnlessRaises(Utils.WafError, bld.load)
def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' dir = find_config_dir(conf) env = conf.env bdir = os.path.join(dir, 'testbuild2') if not os.path.exists(bdir): os.makedirs(bdir) subdir = os.path.join(dir, "liblctest") os.makedirs(subdir) dest = open(os.path.join(subdir, 'liblc1.c'), 'w') dest.write( '#include <stdio.h>\nint lib_func(void) { FILE *f = fopen("foo", "r");}\n' ) dest.close() bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) bld(features='cc cshlib', source='liblctest/liblc1.c', ldflags=conf.env['EXTRA_LDFLAGS'], target='liblc', name='liblc') try: bld.compile() conf.check_message(msg, '', True) return True except: conf.check_message(msg, '', False) return False
def __init__(self): ''' General description: This function initializes the database variables and \ index to refer in functions. ''' db = mongodb DBUtil.__init__(self, db) self.collection = db.DeploymentUnit self.deploymentUnitApprovalStatusDB = DeploymentUnitApprovalStatus.DeploymentUnitApprovalStatus( ) self.tagDB = Tags.Tags() self.buildDB = Build.Build() self.deploymentFieldsDB = DeploymentFields.DeploymentFields(db) self.deploymentUnitTypeDB = DeploymentUnitType.DeploymentUnitType() self.statedb = State.State(db) # self.deploymentUnitSetDB = DeploymentUnitSet.DeploymentUnitSet() # indexes self.collection.create_index([('name', ASCENDING)], unique=True)
def find_PYZ_dependencies(config): print "I: computing PYZ dependencies..." a = mf.ImportTracker([os.path.join(HOME, 'support')]) a.analyze_r('archive') mod = a.modules['archive'] toc = Build.TOC([(mod.__name__, mod.__file__, 'PYMODULE')]) for i in range(len(toc)): nm, fnm, typ = toc[i] mod = a.modules[nm] tmp = [] for importednm, isdelayed, isconditional, level in mod.imports: if not isconditional: realnms = a.analyze_one(importednm, nm) for realnm in realnms: imported = a.modules[realnm] if not isinstance(imported, mf.BuiltinModule): tmp.append((imported.__name__, imported.__file__, imported.typ)) toc.extend(tmp) toc.reverse() config['PYZ_dependencies'] = toc.data
def process_man(self): if not getattr(self, 'files', None): return for x in self.to_list(self.files): node = self.path.find_resource(x) if not node: raise Build.BuildError('cannot find input file %s for processing' % x) target = self.target if not target: target = node.name out = self.path.find_or_declare(x + '.gz') tsk = self.create_task('copy') tsk.set_inputs(node) tsk.set_outputs(out) tsk.fun = gzip_func tsk.install_path = '${MANDIR}/man' + getattr(self, 'section', '1') tsk.color = 'BLUE'
def run_build(opts, args, spec_file): Build.opts = opts Build.args = args Build.main(spec_file, configfilename=opts.configfile)
def build(self): # just a wrapper around the build module Build.build(self.sim_name, self.mod_name, self.sim_params, self.log_prefix)
f = Freezer(targetdir, excludes=excludes) f.include_py = False f.addScript("run.py") f() # starts the freezing process elif task == 'PyInstaller': sys.path.append('/home/jorn/pyinstaller-1.3') import Build specnm = 'run' Build.SPECPATH = os.getcwd() Build.WARNFILE = os.path.join(Build.SPECPATH, 'warn%s.txt' % specnm) Build.BUILDPATH = os.path.join(Build.SPECPATH, 'build%s' % specnm) if not os.path.exists(Build.BUILDPATH): os.mkdir(Build.BUILDPATH) a = Build.Analysis([ os.path.join(Build.HOMEPATH, 'support/_mountzlib.py'), os.path.join(Build.HOMEPATH, 'support/useUnicode.py'), os.path.abspath('./run.py') ], pathex=['/home/jorn/pyinstaller-1.3']) pyz = Build.PYZ(a.pure - [(mn, '', '') for mn in excludes]) exe = Build.EXE(pyz, a.scripts, exclude_binaries=1, name='buildrun/run', debug=False, strip=False, upx=False, console=1) coll = Build.COLLECT(exe, a.binaries, strip=False, upx=False, name='dist') else: # Use py2exe to freeze the client.
ImageUtil.create_image("temp", "res/tile/tile-template.png") ImageUtil.create_image("landfill", "res/tile/tile-landfill.png") ImageUtil.create_image("truck1-TL", "res/trucks/truck1/truck1TL.png") ImageUtil.create_image("truck1-TR", "res/trucks/truck1/truck1TR.png") ImageUtil.create_image("truck1-BL", "res/trucks/truck1/truck1BL.png") ImageUtil.create_image("truck1-BR", "res/trucks/truck1/truck1BR.png") ImageUtil.create_image("menu-name-white", "res/mainmenu/name-white.png") ImageUtil.create_image("menu-name-black", "res/mainmenu/name-black.png") ImageUtil.create_image("menu-back", "res/mainmenu/menu-back.png") ImageUtil.create_image("incinerator", "res/tile/tile-incinerator.png") ImageUtil.create_image("recycler", "res/tile/tile-recycler.png") ImageUtil.create_image("blackhole", "res/tile/tile-blackholefacility.png") SoundUtil.create_sound("click", "res/sound/Click.ogg") Build.init() GameState = GAME_STATE_MENU def startGame(): global GameState GameState = GAME_STATE_GAME aMainMenu.init(screen, startGame) Map.loadMap() Map.loadPathFindingMap() Map.scrollX = 1000 Map.scrollY = -200
def main(): """ Main progression of the program :return: 0 if success """ parser, args = init() if args.pull: print("Updating Student Repositories:") grade_list = get_student_directories()[0] print("Loaded {} student repositories\n".format(len(grade_list))) # Reference to move program back to top level after finishing work on a student top_level = os.getcwd() # Iterate through the list of folders identified as student folders for student in grade_list: os.chdir("{}/{}".format(top_level, student)) # Go into the student's directory GitFunction.pull() os.chdir(top_level) elif args.reset: print("Resetting Student Repositories:") grade_list = get_student_directories()[0] print("Loaded {} student repositories\n".format(len(grade_list))) # Reference to move program back to top level after finishing work on a student top_level = os.getcwd() # Iterate through the list of folders identified as student folders for student in grade_list: os.chdir("{}/{}".format(top_level, student)) # Go into the student's directory GitFunction.reset() os.chdir(top_level) elif args.grade is not None: # Setup grading process config_file, config_location = Config.setup_config(args) # Generate list of student directories to be graded grade_list, excluded = get_student_directories(start=args.student) print("Starting Grading at: {}".format(grade_list[0])) print("Excluded: ", end="") print_array_of_strings(excluded) print("Loaded {} student repositories\n".format(len(grade_list))) # Reference to move program back to top level after finishing work on a student top_level = os.getcwd() # Iterate through the list of folders identified as student folders for student in grade_list: # Start the grading for a new student print("-------------------------------------------------------------") print("Grading:{} for Assignment:{}\n".format(student, config_file.dir)) os.chdir("{}/{}".format(top_level, student)) # Go into the student's directory # Reset and then Update the student repository GitFunction.reset() GitFunction.pull() # Move into the assignment's directory if cd_into_assignment(top_level=top_level, student=student, config=config_file): checkout = False # Git Log information if GitFunction.log(config=config_file): if yes_no_question("Checkout to another commit?", y_default=False): checkout = GitFunction.checkout(input("Hash:")) # Build student source (if needed) if config_file.build is not None: if Build.confirm_files(config=config_file): ready_for_build = True else: print("Directory Contains:") print_array_of_strings(os.listdir(os.getcwd())) ready_for_build = yes_no_question("\nMissing required files. Continue with build?", y_default=False) if config_file.build is not None and ready_for_build: # Prepare assignment folder by moving support files move_support_files(config_file, config_location, os.getcwd()) built = Build.build(config_file) else: built = True if not Build.confirm_files(config=config_file): print("Directory Contains:") print_array_of_strings(os.listdir(os.getcwd())) yes_no_question("\nMissing required files. Continue?", y_default=False) move_support_files(config_file, config_location, os.getcwd()) if not built: built = yes_no_question("\nError or warning while build. Would you like to continue?", y_default=False) if built: if len(config_file.diff_actions) != 0: # Diff Testing if yes_no_question("\nExecute to Diff Tests?"): execute_testing("diff", config_file.diff_actions, config_location) if len(config_file.unit_actions) != 0: # Unit Testing if yes_no_question("\nExecute to Unit Tests?"): execute_testing("unit", config_file.unit_actions) if len(config_file.bash_actions) != 0: # Extra bash commands if yes_no_question("\nExecute Additional Bash?"): execute_testing("bash", config_file.bash_actions) if yes_no_question("\nView source files?"): view_source(config_file) # Restore repository os.chdir("{}/{}".format(top_level, student)) # Go into the student's directory GitFunction.reset(checkout_executed=checkout) # Go back to top level & proceed to next student os.chdir(top_level) if yes_no_question("\nContinue to next student"): print("") else: break else: parser.print_help() print("\n") return 0
def run_c_code(self, *k, **kw): test_f_name = kw['compile_filename'] k = 0 while k < 10000: # make certain to use a fresh folder - necessary for win32 dir = os.path.join(self.blddir, '.conf_check_%d' % k) # if the folder already exists, remove it try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except OSError: break k += 1 try: os.makedirs(dir) except: self.fatal('cannot create a configuration test folder %r' % dir) try: os.stat(dir) except: self.fatal('cannot use the configuration test folder %r' % dir) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = kw['env'] dest = open(os.path.join(dir, test_f_name), 'w') dest.write(kw['code']) dest.close() back = os.path.abspath('.') bld = Build.BuildContext() bld.log = self.log bld.all_envs.update(self.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) os.chdir(dir) bld.rescan(bld.srcnode) if not 'features' in kw: # conf.check(features='cc cprogram pyext', ...) kw['features'] = [kw['compile_mode'], kw['type']] # "cprogram cc" o = bld(features=kw['features'], source=test_f_name, target='testprog') for k, v in kw.iteritems(): setattr(o, k, v) self.log.write("==>\n%s\n<==\n" % kw['code']) # compile the program try: bld.compile() except Utils.WafError: ret = Utils.ex_stack() else: ret = 0 # chdir before returning os.chdir(back) if ret: self.log.write('command returned %r' % ret) self.fatal(str(ret)) # if we need to run the program, try to get its result # keep the name of the program to execute if kw['execute']: lastprog = o.link_task.outputs[0].abspath(env) args = Utils.to_list(kw.get('exec_args', [])) proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) (out, err) = proc.communicate() w = self.log.write w(str(out)) w('\n') w(str(err)) w('\n') w('returncode %r' % proc.returncode) w('\n') if proc.returncode: self.fatal(Utils.ex_stack()) ret = out return ret
def loadBridge(bridgeID, world): materialStack, jointList, jointNum, dif, land = Build.loadBridge(bridgeID) Dirt1Y, Dirt1width, Dirt1height, Dirt2X, Dirt2Y, Dirt2width, Dirt2height = Graphics.dirtSize( dif, land) Dirt1Y = 30 - Dirt1Y / 20 Dirt1width = Dirt1width / 20 Dirt1height = Dirt1height / 20 Dirt2X = Dirt2X / 20 Dirt2Y = 30 - Dirt2Y / 20 Dirt2width = Dirt2width / 20 Dirt2height = Dirt2height / 20 for joint in jointList: convx, convy = joint['point'] joint['point'] = (convx / 20, 30 - convy / 20) GB1fix = b2FixtureDef(shape=b2PolygonShape(box=(Dirt1width / 2, Dirt1height / 2)), friction=0.2, categoryBits=0x0002, maskBits=0x0004) groundBody1 = world.CreateStaticBody(position=(Dirt1width / 2, Dirt1height / 2), fixtures=GB1fix) GB2fix = b2FixtureDef(shape=b2PolygonShape(box=(Dirt2width / 2, Dirt2height / 2)), friction=0.2, categoryBits=0x0002, maskBits=0x0004) groundBody2 = world.CreateStaticBody(position=(50 - Dirt2width / 2, Dirt2height / 2), fixtures=GB2fix) wallfix = b2FixtureDef(shape=b2PolygonShape(box=(1, 15)), friction=0.2, categoryBits=0x0002, maskBits=0x0004) wall1 = world.CreateStaticBody(position=(-1, 15), fixtures=wallfix) wall2 = world.CreateStaticBody(position=(51, 15), fixtures=wallfix) for material in materialStack: material.createBody(world) for joint in jointList: materialAboutJoint = [] for material in materialStack: if joint['index'] == material.getJoint1( ) or joint['index'] == material.getJoint2(): materialAboutJoint.append(material) joint['materials'] = materialAboutJoint jointx, jointy = joint['point'] if (jointx == Dirt1width and jointy <= Dirt1Y) or (jointx <= Dirt1width and jointy == Dirt1Y): joint['onGround1'] = True joint['onGround2'] = False elif (jointx == Dirt2X and jointy <= Dirt2Y) or (jointx >= Dirt2X and jointy == Dirt2Y): joint['onGround1'] = False joint['onGround2'] = True else: joint['onGround1'] = False joint['onGround2'] = False createJoints(joint, world, groundBody1, groundBody2) return (materialStack, jointList, dif, land)
config = {'useELFEXE': 1} # Save Python version, to detect and avoid conflicts config["pythonVersion"] = sys.version config["pythonDebug"] = __debug__ find_EXE_dependencies(config) test_TCL_TK(config) test_Zlib(config) test_Crypt(config) test_RsrcUpdate(config) test_unicode(config) test_UPX(config) find_PYZ_dependencies(config) Build._save_data(configfilename, config) print "I: done generating", configfilename if __name__ == '__main__': from pyi_optparse import OptionParser parser = OptionParser(usage="%prog [options]") parser.add_option('--target-platform', default=None, help='Target platform, required for cross-bundling ' '(default: current platform).') parser.add_option('--upx-dir', default=None, help='Directory containing UPX.') parser.add_option('--executable', default=None, help='Python executable to use. Required for ' 'cross-bundling.') parser.add_option('-C', '--configfile',
def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = conf.env subdir = os.path.join(dir, "libdir") os.makedirs(subdir) dest = open(os.path.join(subdir, 'lib1.c'), 'w') dest.write('int lib_func(void) { return 42; }\n') dest.close() dest = open(os.path.join(dir, 'main.c'), 'w') dest.write('int main(void) {return !(lib_func() == 42);}\n') dest.close() bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) ldflags = [] if version_script: ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath()) dest = open(os.path.join(dir,'vscript'), 'w') dest.write('TEST_1.0A2 { global: *; };\n') dest.close() bld(features='cc cshlib', source='libdir/lib1.c', target='libdir/lib1', ldflags=ldflags, name='lib1') o = bld(features='cc cprogram', source='main.c', target='prog1', uselib_local='lib1') if rpath: o.rpath=os.path.join(bdir, 'default/libdir') # compile the program try: bld.compile() except: conf.check_message(msg, '', False) return False # path for execution lastprog = o.link_task.outputs[0].abspath(env) if not rpath: if 'LD_LIBRARY_PATH' in os.environ: old_ld_library_path = os.environ['LD_LIBRARY_PATH'] else: old_ld_library_path = None ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir')) # we need to run the program, try to get its result args = conf.SAMBA_CROSS_ARGS(msg=msg) proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) (out, err) = proc.communicate() w = conf.log.write w(str(out)) w('\n') w(str(err)) w('\nreturncode %r\n' % proc.returncode) ret = (proc.returncode == 0) if not rpath: os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or '' conf.check_message(msg, '', ret) return ret
content = f.readlines() # you may also want to remove whitespace characters like `\n` at the end of each line content = [x.strip() for x in content] index = words.index('-m') methods=methods.split(',') print(methods) trees={} if(("SF" in methods)or (" SF" in methods)): trees_structure,overlap_per,inconsist_per=SF.main(file_name,2) trees["SuperFine"]=trees_structure #print(trees_structure) #trees_structure.show() if("Build" in methods): tree_Build=Build.main(file_name,1) trees["Build"]=tree_Build if("GSCM" in methods): tree_scm=GSCM.main(file_name,"Common") trees["GSCM"]=tree_scm #tree_scm.show() if("TMC" in methods): tree_TMC,overlap_per1,inconsist_per1,triplets_dict1=TMC.main(file_name,2) trees["Triplet MaxCut"]=tree_TMC #tree_TMC.show() if("PhySIC" in methods): tree_PhySIC=PhySIC.main(file_name,1) trees["PhySIC"]=tree_PhySIC #tree_PhySIC.show() if("-ps"in ans):
app = Application(e['appName']) resIdx.addResource(app) execName = e['execName'] if verbose: print "now processing files for execution:%s" % execName concurrency = e['concurrency'] if (concurrency.find("PTHREADS") >= 0) or \ (concurrency.find("OPENMP") >= 0): threaded = True else: threaded = False buildFile = eInfo.dataDir + "/" + execName + ".bld" # creates build and execution resources Build.getBuildInfo(resIdx, buildFile, execName, ptds) runFile = eInfo.dataDir + "/" + execName + ".run" # uses build and execution resources in resIdx Run.getRunInfo(resIdx, runFile, eInfo, ptds) [execution] = resIdx.findResourcesByType("execution") app.addExecution(execution) parsePerf.getPerfInfo(resIdx, execName, \ eInfo.dataDir, eInfo.perfTools, \ threaded, ptds) writeLst = resIdx.PTdF() write_files(eInfo.dataDir, execName, writeLst, opt.split) print "PTDF execution data generation complete." except PTexception, a:
def run_c_code(self,*k,**kw): test_f_name=kw['compile_filename'] k=0 while k<10000: dir=os.path.join(self.blddir,'.conf_check_%d'%k) try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except OSError: break k+=1 try: os.makedirs(dir) except: self.fatal('cannot create a configuration test folder %r'%dir) try: os.stat(dir) except: self.fatal('cannot use the configuration test folder %r'%dir) bdir=os.path.join(dir,'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env=kw['env'] dest=open(os.path.join(dir,test_f_name),'w') dest.write(kw['code']) dest.close() back=os.path.abspath('.') bld=Build.BuildContext() bld.log=self.log bld.all_envs.update(self.all_envs) bld.all_envs['default']=env bld.lst_variants=bld.all_envs.keys() bld.load_dirs(dir,bdir) os.chdir(dir) bld.rescan(bld.srcnode) o=bld(features=[kw['compile_mode'],kw['type']],source=test_f_name,target='testprog') for k,v in kw.iteritems(): setattr(o,k,v) self.log.write("==>\n%s\n<==\n"%kw['code']) try: bld.compile() except Utils.WafError: ret=Utils.ex_stack() else: ret=0 os.chdir(back) if ret: self.log.write('command returned %r'%ret) self.fatal(str(ret)) if kw['execute']: lastprog=o.link_task.outputs[0].abspath(env) if kw['execute']: args=Utils.to_list(kw.get('exec_args',[])) try: data=Utils.cmd_output([lastprog]+args).strip() except ValueError,e: self.fatal(Utils.ex_stack()) ret=data
config = {'useELFEXE': 1} # Save Python version, to detect and avoid conflicts config["pythonVersion"] = sys.version config["pythonDebug"] = __debug__ find_EXE_dependencies(config) test_TCL_TK(config) test_Zlib(config) test_Crypt(config) test_RsrcUpdate(config) test_unicode(config) test_UPX(config) find_PYZ_dependencies(config) Build._save_data(configfilename, config) print "I: done generating", configfilename if __name__ == '__main__': from pyi_optparse import OptionParser parser = OptionParser(usage="%prog [options]") parser.add_option('--target-platform', default=None, help='Target platform, required for cross-bundling ' '(default: current platform).') parser.add_option('--upx-dir', default=None, help='Directory containing UPX.') parser.add_option('--executable', default=None,
config['hasUPX'] = hasUPX # now write out config, so Build can load outf = open(configfile, 'w') import pprint pprint.pprint(config, outf) outf.close() import Build # PYZ_dependencies print "I: computing PYZ dependencies..." a = mf.ImportTracker([os.path.join(HOME, 'support')]) a.analyze_r('archive') mod = a.modules['archive'] toc = Build.TOC([(mod.__name__, mod.__file__, 'PYMODULE')]) for i in range(len(toc)): nm, fnm, typ = toc[i] mod = a.modules[nm] tmp = [] for importednm, isdelayed, isconditional in mod.imports: if not isconditional: realnms = a.analyze_one(importednm, nm) for realnm in realnms: imported = a.modules[realnm] if not isinstance(imported, mf.BuiltinModule): tmp.append((imported.__name__, imported.__file__, imported.typ)) toc.extend(tmp) toc.reverse() config['PYZ_dependencies'] = toc.data
import shutil import pprint import re import glob import mf import bindepend import Build HOME = os.path.dirname(sys.argv[0]) iswin = sys.platform[:3] == 'win' is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4) cygwin = sys.platform == 'cygwin' if sys.platform == 'darwin' and Build.architecture() == '64bit': print "ERROR: PyInstaller does not support Python 64-bit on Mac OSX" print "Try using the 32-bit version of Python, by setting" print "VERSIONER_PYTHON_PREFER_32_BIT=yes in the environment" print "or run Python as 32-bit binary by command:" print "" print "arch -i386 python" sys.exit(2) def find_EXE_dependencies(config): global target_platform, target_iswin print "I: computing EXE_dependencies" python = opts.executable or sys.executable target_platform = opts.target_platform or sys.platform config['python'] = python