def test_main(self): ''' Test main with no arguments ''' old_args = sys.argv sys.argv = [''] build.main() sys.argv = old_args
def main(): parser = argparse.ArgumentParser() parser.add_argument('--example', default='dashboard') parser.add_argument('--jwm-version', default=None) parser.add_argument('--skija-version', default='0.98.0') parser.add_argument('--skija-dir', default=None) parser.add_argument('--types-dir', default=None) args = parser.parse_args() if not args.jwm_version: build.main() if args.skija_dir: skija_dir = os.path.abspath(args.skija_dir) os.chdir(common.basedir) # compile classpath = common.deps_compile() if args.jwm_version: classpath += [ build_utils.fetch_maven('io.github.humbleui.jwm', 'jwm', args.jwm_version) ] else: classpath += [ 'target/classes', build_utils.system + '/build' ] if args.skija_dir: classpath += [ skija_dir + '/platform/build', skija_dir + '/platform/target/classes', skija_dir + '/shared/target/classes', ] else: skija_native = 'skija-' + build_utils.system + (('-' + build_utils.arch) if 'macos' == build_utils.system else '') classpath += [ build_utils.fetch_maven('io.github.humbleui', 'skija-shared', args.skija_version), build_utils.fetch_maven('io.github.humbleui', skija_native, args.skija_version), ] sources = build_utils.files(f'examples/{args.example}/java/**/*.java') build_utils.javac(sources, f'examples/{args.example}/target/classes', classpath = classpath, release='16') # run subprocess.check_call([ 'java', '--class-path', build_utils.classpath_join(classpath + [f'examples/{args.example}/target/classes']), '-Djava.awt.headless=true', '-enableassertions', '-enablesystemassertions', '-Dfile.encoding=UTF-8', '-Xcheck:jni', 'io.github.humbleui.jwm.examples.Example' ]) return 0
def test_main(self): ''' execute main ''' import build old_args = sys.argv sys.argv = ['buildrun', '--test-env', '-b', '-c', '-d', '-e', '-f'] build.main() sys.argv = old_args
def test_printhelp(self): ''' print the help ''' import build old_args = sys.argv sys.argv = ['buildrun'] build.main() sys.argv = old_args
def continuous_build(should_run): """ :type should_run: Event """ import build while should_run['now']: build.main() sleep(1)
def main(): # Get the branch name name = raw_input('Enter in branch name: ') print util.log('Checking to new branch') # Attempt to checkout branch print subprocess.check_output(['git', 'checkout', '-b', name]) # Once done, push to new branch print util.log('Pushing to remote branch') # If it's updated, let's go through with the next process print subprocess.check_output(['git', 'push', 'origin', 'HEAD']) print util.log('Update build') build.main()
def main(): parser = argparse.ArgumentParser() parser.add_argument("--dry-run", action="store_true") (args, _) = parser.parse_known_args() # Build build.main() # Update poms rev = revision.revision() os.chdir(common.root + '/platform') artifact = "skija-" + common.classifier with common.replaced( 'deploy/META-INF/maven/org.jetbrains.skija/' + artifact + '/pom.xml', {'${version}': rev}): with common.replaced( 'deploy/META-INF/maven/org.jetbrains.skija/' + artifact + '/pom.properties', {'${version}': rev}): with open( 'target/classes/org/jetbrains/skija/' + common.classifier.replace('-', '/') + '/skija.version', 'w') as f: f.write(rev) print("Packaging " + artifact + "-" + rev + ".jar") subprocess.check_call([ "jar", "--create", "--file", "target/" + artifact + "-" + rev + ".jar", "-C", "target/classes", ".", "-C", "deploy", "META-INF/maven/org.jetbrains.skija/" + artifact ]) if not args.dry_run: print("Deploying", artifact + "-" + rev + ".jar") subprocess.check_call([ common.mvn, "--batch-mode", "--settings", "deploy/settings.xml", "-Dspace.username="******"USER_NAME"), "-Dspace.password="******"SPACE_TOKEN"), "deploy:deploy-file", "-Dfile=target/" + artifact + "-" + rev + ".jar", "-DpomFile=deploy/META-INF/maven/org.jetbrains.skija/" + artifact + "/pom.xml", "-DrepositoryId=space-maven", "-Durl=" + common.space_skija, ]) return 0
def build_from_tarball(my_build, target): apps_dir = "" nuwrf_dir = os.environ.get("NUWRFDIR") if "discover" in platform.node() or "borg" in platform.node(): apps_dir = "/discover/nobackup/projects/nu-wrf/external_apps/" if os.path.isfile(apps_dir + target + ".tgz"): os.chdir(nuwrf_dir) tf = tarfile.open(apps_dir + target + ".tgz") tf.extractall() build.main(my_build, target) else: logger.error(apps_dir + target + ".tgz not found!\n" "To build, place tarball named " + target + ".tgz in {} and re-run command".format(nuwrf_dir))
def main(): ruby_version = subprocess.check_output(['cat', '.ruby-version' ]).replace('\n', '') # Sets up ruby print util.log('Installing ruby ' + ruby_version) print subprocess.check_output(['rvm', 'install', ruby_version]) # Sets up rvm print util.log('Use Ruby ' + ruby_version) subprocess.call(['./bin/use-rvm.sh']) # Install bundle print util.log('Install Bundle') print subprocess.check_output(['gem', 'install', 'bundler']) # Install gems build.main()
def main(): # Save option option = int(show_menu()) # Go through if option == 1: devmenu.main() elif option == 2: merge.main() elif option == 3: build.main() elif option == 4: setup.main() elif option == 5: quit()
def run_tests(args): # Update node modules if needed. if not shakaBuildHelpers.update_node_modules(): return 1 # Generate dependencies and compile library. # This is required for the tests. if gendeps.gen_deps([]) != 0: return 1 build_args = [] if '--force' in args: build_args.append('--force') args.remove('--force') if '--no-build' in args: args.remove('--no-build') else: if build.main(build_args) != 0: return 1 if '--runs' in args: return run_tests_multiple(args) else: return run_tests_single(args)
def main() -> Tuple[str, str, str]: build.main() os.chdir(common.basedir) build_utils.copy_replace( "deploy/META-INF/maven/io.github.humbleui/jwm/pom.xml", "target/maven/META-INF/maven/io.github.humbleui/jwm/pom.xml", {"${version}": common.version}) build_utils.copy_replace( "deploy/META-INF/maven/io.github.humbleui/jwm/pom.properties", "target/maven/META-INF/maven/io.github.humbleui/jwm/pom.properties", {"${version}": common.version}) jar = build_utils.jar(f"target/jwm-{common.version}.jar", ("target/classes", "."), ("target/maven", "META-INF")) build_utils.makedirs("target/src/io/github/humbleui/jwm") shutil.copytree("linux/java", "target/src/io/github/humbleui/jwm", dirs_exist_ok=True) shutil.copytree("macos/java", "target/src/io/github/humbleui/jwm", dirs_exist_ok=True) shutil.copytree("shared/java", "target/src/io/github/humbleui/jwm", dirs_exist_ok=True) shutil.copytree("windows/java", "target/src/io/github/humbleui/jwm", dirs_exist_ok=True) sources = build_utils.jar(f"target/jwm-{common.version}-sources.jar", ("target/src", "."), ("target/maven", "META-INF")) build_utils.delombok(["target/src"], "target/delomboked", classpath=common.deps_compile()) build_utils.javadoc(["target/delomboked"], "target/apidocs", classpath=common.deps_compile()) javadoc = build_utils.jar(f"target/jwm-{common.version}-javadoc.jar", ("target/apidocs", "."), ("target/maven", "META-INF")) return (jar, sources, javadoc)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--skija-version') (args, _) = parser.parse_known_args() modulepath = [] if args.skija_version: modulepath += [ common.fetch_maven('org.jetbrains.skija', 'skija-shared', args.skija_version, repo=common.space_skija), common.fetch_maven('org.jetbrains.skija', 'skija-' + common.classifier, args.skija_version, repo=common.space_skija) ] else: build.main() modulepath += [ '../shared/target/classes', '../platform/target/classes' ] os.chdir(common.root + '/tests') sources = common.glob('java', '*.java') common.javac(sources, 'target/classes', modulepath=modulepath, add_modules=[common.module]) common.check_call([ 'java', # '--class-path', common.classpath_separator.join(modulepath + ['target/classes']), '--class-path', 'target/classes', '--module-path', common.classpath_separator.join(modulepath), '--add-modules', common.module ] + (['-XstartOnFirstThread'] if 'macos' == common.system else []) + [ '-Djava.awt.headless=true', '-enableassertions', '-enablesystemassertions', '-Xcheck:jni', '-Dskija.logLevel=DEBUG', 'org.jetbrains.skija.test.TestSuite' ]) return 0
def main(): # Clean, then build. Couldn't be simpler! error = clean.main() if error!=0: return error return build.main()
def main(args): parser = argparse.ArgumentParser( description='User facing build script for building the Shaka' ' Player Project.') parser.add_argument( '--force', '-f', help='Force building the library even if no files have changed.', action='store_true') parser.add_argument( '--debug', help='Limit which build types to build. Will at least build the debug ' 'version.', action='store_true') parser.add_argument( '--release', help='Limit which build types to build. Will at least build the ' 'release version.', action='store_true') parsed_args = parser.parse_args(args) code = gendeps.gen_deps([]) if code != 0: return code code = check.main([]) if code != 0: return code build_args = ['--name', 'compiled', '+@complete'] if parsed_args.force: build_args += ['--force'] # Create the list of build modes to build with. If the list is empty # by the end, then populate it with every mode. modes = [] modes += ['debug'] if parsed_args.debug else [] modes += ['release'] if parsed_args.release else [] # If --debug or --release are not given, build with everything. if not modes: modes += ['debug', 'release'] result = 0 for mode in modes: result = build.main(build_args + ['--mode', mode]) # If a build fails then there is no reason to build the other modes. if result: break return result
def main(args): parser = argparse.ArgumentParser( description='User facing build script for building the Shaka' ' Player Project.') parser.add_argument( '--force', '-f', help='Force building the library even if no files have changed.', action='store_true') parser.add_argument( '--debug', help='Limit which build types to build. Will at least build the debug ' 'version.', action='store_true') parser.add_argument( '--release', help='Limit which build types to build. Will at least build the ' 'release version.', action='store_true') parsed_args = parser.parse_args(args) code = gendeps.gen_deps([]) if code != 0: return code code = check.main([]) if code != 0: return code build_args = ['--name', 'compiled', '+@complete'] if parsed_args.force: build_args += ['--force'] # Create the list of build modes to build with. If the list is empty # by the end, then populate it with every mode. modes = [] modes += ['--debug'] if parsed_args.debug else [] modes += ['--release'] if parsed_args.release else [] # If --debug or --release are not given, build with everything. if not modes: modes += ['--debug', '--release'] result = 0 for mode in modes: result = build.main(build_args + [mode]) # If a build fails then there is no reason to build the other modes. if result: break return result
def main(cmd_line_args): """Description of t main setup process - get all absolute paths of foxBMS repositories - clone all repositories or general and specified ones - build documentation (sphinx, and Doxygen for both microcontrollers) if not otherwise specified Args: cmd_line_args (Namespace): Arguments passed by the command line """ if cmd_line_args.verbosity == 1: logging.basicConfig(level=logging.INFO) elif cmd_line_args.verbosity > 1: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) if cmd_line_args.specfiy_software_branch: global SW_VERSION SW_VERSION = cmd_line_args.specfiy_software_branch if cmd_line_args.specfiy_hardware_branch: global HW_VERSION HW_VERSION = cmd_line_args.specfiy_hardware_branch repository_basepath, setup_repo_name = get_main_git_path() logging.info(PRINT_MARK) logging.info('Setting up the foxBMS project in directory') logging.info(os.path.dirname(os.path.realpath(__file__))) logging.info(PRINT_MARK) # setup general software dependency repositories repo_list = read_yaml() for repos in repo_list: yaml_repos_abspath = set_git_paths(repository_basepath, repos[0]) setup_repo_class(repos[0], yaml_repos_abspath, repos[1], repos[2]) if cmd_line_args.specfiy_repos: info = 'specified repositories' setup_repo_class(specified_repos, specified_repos_abspath, info) if not cmd_line_args.dont_build_documentation: builders = ['--primary', '--secondary', '--doxygen', '--sphinx'] build.main(builders)
def main(_): code = gendeps.genDeps([]) if code != 0: return code code = check.main([]) if code != 0: return code return build.main(['--name', 'compiled', '+@complete'])
def RunCommand(self, karma_conf): """Build a command and send it to Karma for execution. Uses |self.parsed_args| and |self.karma_config| to build and run a Karma command. """ if self.parsed_args.use_xvfb and not shakaBuildHelpers.is_linux(): logging.error('xvfb can only be used on Linux') return 1 if not shakaBuildHelpers.update_node_modules(): logging.error('Failed to update node modules') return 1 karma = shakaBuildHelpers.get_node_binary('karma') cmd = ['xvfb-run', '--auto-servernum' ] if self.parsed_args.use_xvfb else [] cmd += karma + ['start'] cmd += [karma_conf] if karma_conf else [] cmd += ['--settings', json.dumps(self.karma_config)] # There is no need to print a status here as the gendep and build # calls will print their own status updates. if self.parsed_args.build: if gendeps.main([]) != 0: logging.error('Failed to generate project dependencies') return 1 if build.main(['--force'] if self.parsed_args.force else []) != 0: logging.error('Failed to build project') return 1 # Before Running the command, print the command. if self.parsed_args.print_command: logging.info('Karma Run Command') logging.info('%s', cmd) # Run the command. results = [] for run in range(self.parsed_args.runs): logging.info('Running test (%d / %d, %d failed so far)...', run + 1, self.parsed_args.runs, len(results) - results.count(0)) results.append(shakaBuildHelpers.execute_get_code(cmd)) # Print a summary of the results. if self.parsed_args.runs > 1: logging.info('All runs completed. %d / %d runs passed.', results.count(0), len(results)) logging.info('Results (exit code): %r', results) else: logging.info('Run complete') logging.info('Result (exit code): %d', results[0]) return 0 if all(result == 0 for result in results) else 1
def main(args): command = "" if len(args) > 1 : command = args[1] if command == "create": return create.main(args) elif command == "build": return build.main(args) else: return help.main(args)
def main(): import sys if len(sys.argv) != 2: print help sys.exit(2) raise path = sys.argv[1] from build import main main() import os pwd = os.path.abspath(os.curdir) export = os.path.join(pwd, 'EXPORT') from utils.install import copy_all, build_envs_sh copy_all(export, path) build_envs_sh(path) return
def main(): # Install homebrew print util.log('Installing Homebrew') brew = subprocess.check_output(['which', 'brew']).replace('\n', '') if brew == '/usr/local/bin/brew': print subprocess.check_output(['brew', 'update']) else: print subprocess.check_output([ '/usr/bin/ruby', '-e', '"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"' ]) # Set up vapor print util.log('Installing Vapor') install_package('vapor') # Setup other dependicies print util.log('Installing dependencies') install_package('postgresql') install_package('redis') install_package('libpq') # attempt to build build.main()
def RunCommand(self, karma_conf): """Build a command and send it to Karma for execution. Uses |self.parsed_args| and |self.karma_config| to build and run a Karma command. """ if self.parsed_args.use_xvfb and not shakaBuildHelpers.is_linux(): logging.error('xvfb can only be used on Linux') return 1 if not shakaBuildHelpers.update_node_modules(): logging.error('Failed to update node modules') return 1 karma = shakaBuildHelpers.get_node_binary('karma') cmd = ['xvfb-run', '--auto-servernum'] if self.parsed_args.use_xvfb else [] cmd += karma + ['start'] cmd += [karma_conf] if karma_conf else [] cmd += ['--settings', json.dumps(self.karma_config)] # There is no need to print a status here as the gendep and build # calls will print their own status updates. if self.parsed_args.build: if gendeps.gen_deps([]) != 0: logging.error('Failed to generate project dependencies') return 1 if build.main(['--force'] if self.parsed_args.force else []) != 0: logging.error('Failed to build project') return 1 # Before Running the command, print the command. if self.parsed_args.print_command: logging.info('Karma Run Command') logging.info('%s', cmd) # Run the command. results = [] for run in range(self.parsed_args.runs): logging.info('Running test (%d / %d)...', run + 1, self.parsed_args.runs) results.append(shakaBuildHelpers.execute_get_code(cmd)) # Print a summary of the results. if self.parsed_args.runs > 1: logging.info('All runs completed. %d / %d runs passed.', results.count(0), len(results)) logging.info('Results (exit code): %r', results) else: logging.info('Run complete') logging.info('Result (exit code): %d', results[0]) return 0 if all(result == 0 for result in results) else 1
def main(args): code = gendeps.gen_deps([]) if code != 0: return code code = check.main([]) if code != 0: return code build_args = ['--name', 'compiled', '+@complete'] if '--force' in args: build_args.append('--force') if '--debug' in args or '--release' not in args: if build.main(build_args + ['--debug']) != 0: return 1 if '--release' in args or '--debug' not in args: if build.main(build_args) != 0: return 1 return 0
def runTests(args): """Runs all the karma tests.""" # Update node modules if needed. if not shakaBuildHelpers.updateNodeModules(): return 1 # Generate dependencies and compile library. # This is required for the tests. if gendeps.genDeps([]) != 0: return 1 build_args = [] if '--force' in args: build_args.append('--force') args.remove('--force') if '--no-build' in args: args.remove('--no-build') else: if build.main(build_args) != 0: return 1 karma_command_name = 'karma' if shakaBuildHelpers.isWindows(): # Windows karma program has a different name karma_command_name = 'karma.cmd' karma_path = shakaBuildHelpers.getNodeBinaryPath(karma_command_name) cmd = [karma_path, 'start'] # Get the browsers supported on the local system. browsers = _GetBrowsers() if not browsers: print >> sys.stderr, 'Unrecognized system "%s"' % platform.uname()[0] return 1 print 'Starting tests...' if len(args) == 0: # Run tests in all available browsers. print 'Running with platform default:', '--browsers', browsers cmdLine = cmd + ['--browsers', browsers] shakaBuildHelpers.printCmdLine(cmdLine) return subprocess.call(cmdLine) else: # Run with command-line arguments from the user. if '--browsers' not in args: print 'No --browsers specified.' print 'In this mode, browsers must be manually connected to karma.' cmdLine = cmd + args shakaBuildHelpers.printCmdLine(cmdLine) return subprocess.call(cmdLine)
def run_tests(args): """Runs all the karma tests.""" # Update node modules if needed. if not shakaBuildHelpers.update_node_modules(): return 1 # Generate dependencies and compile library. # This is required for the tests. if gendeps.gen_deps([]) != 0: return 1 build_args = [] if "--force" in args: build_args.append("--force") args.remove("--force") if "--no-build" in args: args.remove("--no-build") else: if build.main(build_args) != 0: return 1 karma_command_name = "karma" if shakaBuildHelpers.is_windows(): # Windows karma program has a different name karma_command_name = "karma.cmd" karma_path = shakaBuildHelpers.get_node_binary_path(karma_command_name) cmd = [karma_path, "start"] # Get the browsers supported on the local system. browsers = _get_browsers() if not browsers: print >> sys.stderr, 'Unrecognized system "%s"' % platform.uname()[0] return 1 print "Starting tests..." if not args: # Run tests in all available browsers. print "Running with platform default:", "--browsers", browsers cmd_line = cmd + ["--browsers", browsers] shakaBuildHelpers.print_cmd_line(cmd_line) return subprocess.call(cmd_line) else: # Run with command-line arguments from the user. if "--browsers" not in args: print "No --browsers specified." print "In this mode, browsers must be manually connected to karma." cmd_line = cmd + args shakaBuildHelpers.print_cmd_line(cmd_line) return subprocess.call(cmd_line)
def run_tests_single(args): """Runs all the karma tests.""" # Update node modules if needed. if not shakaBuildHelpers.update_node_modules(): return 1 # Generate dependencies and compile library. # This is required for the tests. if gendeps.gen_deps([]) != 0: return 1 build_args = [] if '--force' in args: build_args.append('--force') args.remove('--force') if '--no-build' in args: args.remove('--no-build') else: if build.main(build_args) != 0: return 1 karma_path = shakaBuildHelpers.get_node_binary_path('karma') cmd = [karma_path, 'start'] if shakaBuildHelpers.is_linux() and '--use-xvfb' in args: cmd = ['xvfb-run', '--auto-servernum'] + cmd # Get the browsers supported on the local system. browsers = _get_browsers() if not browsers: print >> sys.stderr, 'Unrecognized system "%s"' % platform.uname()[0] return 1 print 'Starting tests...' if not args: # Run tests in all available browsers. print 'Running with platform default:', '--browsers', browsers cmd_line = cmd + ['--browsers', browsers] shakaBuildHelpers.print_cmd_line(cmd_line) return subprocess.call(cmd_line) else: # Run with command-line arguments from the user. if '--browsers' not in args: print 'No --browsers specified.' print 'In this mode, browsers must be manually connected to karma.' cmd_line = cmd + args shakaBuildHelpers.print_cmd_line(cmd_line) return subprocess.call(cmd_line)
def main(args): code = gendeps.gen_deps([]) if code != 0: return code code = check.main([]) if code != 0: return code build_args = ['--name', 'compiled', '+@complete'] if '--force' in args: build_args.append('--force') return build.main(build_args)
def start_server(): print('starting development server...') print('starting first compile') build.main([], 'First build -> ') class build_handler(FileSystemEventHandler): def on_any_event(self, event): build.main([], helper.nice_date()) observer = Observer() observer.schedule(build_handler(), path='pages', recursive=True) observer.start() observer2 = Observer() observer2.schedule(build_handler(), path='static', recursive=True) observer2.start() try: while True: time.sleep(0.1) except KeyboardInterrupt: print('Gracefully exiting') observer.stop() observer.join()
def aho_corasick(blob, outfolder): count = 0 state = build.main(images.types) for n in range(0, len(blob)): while state.g(blob[n]) is None: state = state.f state = state.g(blob[n]) if state.output: if images.image.Image.verbose: print("Found magic for " + str(state.output) + " at " + str(n) + ", ") for t in state.output: # We're at the last character in the needle, # figure out the offset of the start of the blob offset = n - len(t.magic) + 1 l = t.calculate_length(blob, offset) if l > 0: name = outfolder + '/image' + str(count) + t.get_extension() f = open(name, 'wb') f.write(blob[offset:offset+l]) f.close() print("Saved %d-byte image to %s" % (l, name)) count += 1 return count
# Realm667 - An Awesome Awesomeness # Copyright (C) 2015, 2016 Alexey Lysiuk # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import sys if (sys.hexversion < 0x2070000 or 0x3000000 <= sys.hexversion < 0x3020000): print('This script requires Python 2.7 or Python 3.2 and higher') exit(1) # all modules are in the lib directory sys.path[0] = os.path.dirname(os.path.abspath(__file__)) + '/lib' import build build.main()
#!/usr/bin/python3 import build build.main(__name__, __file__) class Config(build.Config): languages = ['python']
def main(argv): try: if '-h' in argv or '--help' in argv: return run_nose(argv) sb = Sandbox(SBROOT) #Add different options for using an appliance to run a test #more info about this can be found at #https:// ... /working-with-code/concepts/existing-appliances/run-tests-using-an-appliance # TODO refer to correct doc site #TODO move all the info below into the above website. # #This can # speed things up when creating test # Allows one to debug a test with a full appliance # can be used to run tests with an appliance setup from a variety of repos # #This does not # Replace the current system of running tests off of a sandbox as an appliance # Does not leave behind it's files in a way that is easy to inspect # Is not as reproducable of an environment # #The different options available are # --setup sets up appliances to run tests off of, as well as some # other setup options related to tests # --sb-setup same as --setup, but is specific to the current sandbox # --ip ipaddress --ea applianceName --user username --passwd password # accomplishes the same thing as --setup, but without going through the interactive prompts # --no-adding-args using --setup sb test can be set up to automatically add # arguments when you run sb test, this prevents that from happening # --app someApplianceName the setup file created with --setup or --sb-setup # maps a user defined name with the appliance credentials. You can specify the name # of one of those appliances to run test off of, or you can use a new name and # a brand new appliance will be created and used. You can then reuse that appliance as many times as desired # --recreate-psa this can only be used with an appliance that was previously created # with the above command. This restores the VM to a snapshot before any PSA RPMs were # copied over and installed, the VM then refetches all of the necessary RPMs and # installs them # --branding someBranding --release release_branch these can be used to specify the branding and release # branch when creating a PSA # --repo this can be used instead of the above command to specify a different repo to install off of # this command overrides any branding or release branches that were specified with the above command if '--setup' in argv: argv.remove('--setup') isetup.defaultSetup() print '\n' sys.exit('setup is complete') if '--sb-setup' in argv: argv.remove('--sb-setup') isetup.sbSetup() print '\n' sys.exit('setup is complete') global addingArgs if '--no-adding-args' in argv: addingArgs = False argv.remove('--no-adding-args') if addingArgs: argsToAdd = getAdditionalArgs() if argsToAdd: added = '' for arg in argsToAdd.split(): if arg not in argv: added += ' ' + arg argv.append(arg) if added: print 'added', added global_vars.ea = None ip = None user = None passwd = None def retreiveAndDelArg(arg): for i, tryArg in enumerate(argv): if tryArg == arg: ret = argv[i + 1] del argv[i] del argv[i] return ret branding = None release = None repo = None for i, arg in enumerate(argv): if arg == '--ea' or arg == '--psa': global_vars.ea = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--ip' or arg == '--host': ip = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--user': user = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--passwd' or arg == '--password': passwd = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--release': release = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--branding': branding = argv[i + 1] del argv[i] del argv[i] for i, arg in enumerate(argv): if arg == '--repo': repo = argv[i + 1] del argv[i] del argv[i] if global_vars.ea and ip and user and passwd: setup.setSetupFile() setup.configAppliance(global_vars.ea, ip, user, passwd) #make sure we're creating a new PSA if an appliance sandbox type is being used if 'appliance' in sb.get_sandboxtype().get_variant(): global_vars.ea = 'sb-test-appliance' if '--recreate-psa' not in argv and global_vars.ea in isetup.getAppliances(): argv.append('--recreate-psa') isetup.setSetupFile() #create the appliance if the appliance doesn't exist in the setup file if global_vars.ea and global_vars.ea not in isetup.getAppliances(): ansi.printc('Appliance %s does not exist, will attempt to create it in 5 seconds, push ctrl+c to cancel' % global_vars.ea, colors.WARNING_COLOR) time.sleep(5) ip = controlVM.createPSA(global_vars.ea, branding, release, repo) isetup.setAppliance(global_vars.ea, ip, 'root', 'psa') if '--recreate-psa' in argv: assert global_vars.ea, 'A VM must be specified with the --ea option to use the --recreate-psa option' controlVM.createPSA(global_vars.ea, branding, release, repo) argv.remove('--recreate-psa') # Support "modes" of tests. This allows us to run a set of tests in # more than one way -- for example, with valgrind enabled or disabled, # or with SearchServer using hs5 versus hs3. mode_path = os.path.join(sb.get_root(), _MODE_FILE) argv, modes = get_modes(argv) if os.path.isfile(mode_path): os.remove(mode_path) if modes: if modes[0].lower() != 'none': with open(mode_path, 'w') as f: f.write(','.join(modes)) attr_arg = find_attr_arg_index(argv) if len(modes) == 0 and attr_arg is None: modeattrs = sb.get_sandboxtype().get_test_mode_attrs_combo() result = 0 runcompiled = True runcodescan = True for modeattr in modeattrs.split(';'): runnable_assembly._modes = None if modeattr.find(':') == -1: if len(modeattr) > 3: print('Skipping mode+attr combo "%s" because no separator ":" was found' % modeattr) continue print('Running mode+attr combo: %s' % modeattr) mode, attrs = modeattr.split(':') argnew = argv[:]#copy argv into argnew so we can append things for this round of tests argnew.insert(1, '-A') argnew.insert(2, attrs.strip()) argnew.insert(3, '--mode') argnew.insert(4, mode.strip()) if not runcompiled: argnew.append('--skip-compiled') if not runcodescan: argnew.append('--skip-codescan') result = result + main(argnew); runcompiled = False runcodescan = False print("") if result == 0: print("All test combos passed.") else: print("%s test combos failed!" % result) return result elif attr_arg is None: argv.insert(1, '-A') attrs = sb.get_sandboxtype().get_test_attrs() argv.insert(2, attrs) print('Running tests that are: %s.' % attrs) else: # If we're running interactive tests, make sure we're also # running with the -s switch. print('Running tests that are: %s.' % argv[attr_arg + 1]) x = argv[attr_arg + 1].replace('not ', '!') if 'interactive' in x and '!interactive' not in x: if '-s' not in argv: argv.insert(1, '-s') if len(modes) > 0: if modes[0].lower() == 'none': modes = [] print('Running modes: %s' % ','.join(modes)) # Make sure any path args in the arg list are fully normalized so they # are not sensitive to working directory changes. normalize_path_args(argv) # If we're collecting tests, it's virtually guaranteed that we want to # display the list... if '--collect-only' in argv: if '-v' not in argv and '--verbose' not in argv: verbosity_found = False for a in argv: if a.startswith('--verbosity'): verbosity_found = True break if not verbosity_found: argv.append('--verbose') # The "full" flag tells us to test all components in the sandbox, # instead of just the ones that are present in code form. If our scope # is full, no special effort is required. However, if our scope is # normal (quick), we have to tell nose which subdirs to use during # discovery. if not SPECIFIC_TESTS_SELECTED: global quick if '--full' in argv: argv.remove('--full') quick = False if '--quick' in argv: quick = True argv.remove('--quick') if quick: subset = [] cr = sb.get_code_root() tr = sb.get_test_root() deps = [l.name for l in sb.get_cached_components()] for c in deps: if component.CODE_ASPECT_NAME in sb.get_component_aspects(c) or c == sb.get_top_component(): component_test_root = sb.get_component_path(c, component.TEST_ASPECT_NAME) if os.path.isdir(component_test_root): if could_have_tests(component_test_root): # Nose treats the first --where arg as a specifier # of the working directory, and subsequent --where # args as folders to use for discovery. So if we're # going to add --where, we have to guarantee working # dir is the first --where. if not subset: argv.append('--where') argv.append(sb.get_root()) argv.append('--where') argv.append(component_test_root) subset.append(c) if subset: print('Targeting: ' + ', '.join(subset) + ' (--full to override).') # The default behavior of the "sb test" command is supposed to be that # all tests get run, no matter what part of the sandbox you're in when # you invoke the command. This is consistent with "sb build", "sb publish", # and other sandbox-level commands. By default, nose will discover tests # beginning in the current working directory. We could override nose's # behavior by adding an extra arg that specifies that the scope for # discovery is the test root, but this would interfere with other logic # that we have, that disables the running of compiled tests when any # sort of constraining file/directory scope is given. So the simplest # way to achieve our goal is to temporarily set the current working dir # to the test root whenever no explicit scope is provided. try: restore_dir = os.getcwd() os.chdir(sb.get_test_root()) # always run tests with the current directory being the test root if '--with-id' not in argv: argv.insert(1, '--with-id') if '--auto' in argv: argv.remove('--auto') auto_test(sb, argv) else: if '--build-first' in argv: sb.set_auto_build(True) argv.remove('--build-first') err = build.main([]) if err: return err elif '--no-build-first' in argv: sb.set_auto_build(False) argv.remove('--no-build-first') # Before running tests, automatically build sandbox if it's out of date. # This allows testers to get a built sandbox and immediately run tests. It # also allows developers to repeatedly code and test without explicitly # rebuilding in between. elif sb.needs_build() and sb.get_auto_build(): print(''' Built artifacts are out of date and will be refreshed before running tests. To run tests without automatic build as needed, use "--no-build-first". ''') err = build.main([]) if err: return err return 0 if run_all(sb, argv) else 1 finally: os.chdir(restore_dir) except: print(traceback.print_exc()) return 1 # make sure bad things show up as an error
#!/usr/bin/env python from build import CHECK, build_and_run, main import sys header = 'Race and BI-access checks' def doit(): checks = [CHECK.RACE, CHECK.BI_ACCESS] return build_and_run(checks) def main_wrapper(argv): main(doit,header,argv) if __name__ == '__main__': sys.exit(main(doit,header))
def main_wrapper(argv): main(doit,header,argv)
def run(self): """compile special java dependencies before the others.""" java.main(path) develop.run(self)
def main(args): parser = argparse.ArgumentParser( description='User facing build script for building the Shaka' ' Player Project.') parser.add_argument( '--locales', type=str, nargs='+', default=generateLocalizations.DEFAULT_LOCALES, help='The list of locales to compile in (default %(default)r)') parser.add_argument( '--fix', help='Automatically fix style violations.', action='store_true') parser.add_argument( '--force', '-f', help='Force building the library even if no files have changed.', action='store_true') parser.add_argument( '--debug', help='Limit which build types to build. Will at least build the debug ' 'version.', action='store_true') parser.add_argument( '--release', help='Limit which build types to build. Will at least build the ' 'release version.', action='store_true') parsed_args = parser.parse_args(args) # Make the dist/ folder, ignore errors. base = shakaBuildHelpers.get_source_base() try: os.mkdir(os.path.join(base, 'dist')) except OSError: pass # Generate localizations before running gendeps, so the output is available # to the deps system. # TODO(#1858): It might be time to look at a third-party build system. localizations = compiler.GenerateLocalizations(parsed_args.locales) if not localizations.generate(parsed_args.force): return 1 if gendeps.main([]) != 0: return 1 check_args = [] if parsed_args.fix: check_args += ['--fix'] if parsed_args.force: check_args += ['--force'] if check.main(check_args) != 0: return 1 docs_args = [] if parsed_args.force: docs_args += ['--force'] if docs.main(docs_args) != 0: return 1 if not compile_less('ui', 'controls', parsed_args): return 1; if not compile_less('demo', 'demo', parsed_args): return 1 build_args_with_ui = ['--name', 'ui', '+@complete'] build_args_with_ui += ['--locales'] + parsed_args.locales build_args_without_ui = ['--name', 'compiled', '+@complete', '-@ui'] if parsed_args.force: build_args_with_ui += ['--force'] build_args_without_ui += ['--force'] # Create the list of build modes to build with. If the list is empty # by the end, then populate it with every mode. modes = [] modes += ['debug'] if parsed_args.debug else [] modes += ['release'] if parsed_args.release else [] # If --debug or --release are not given, build with everything. if not modes: modes += ['debug', 'release'] for mode in modes: # Complete build includes the UI library, but it is optional and player lib # should build and work without it as well. # First, build the full build (UI included) and then build excluding UI. for build_args in [build_args_with_ui, build_args_without_ui]: if build.main(build_args + ['--mode', mode]) != 0: return 1 is_debug = mode == 'debug' if not apps.build_all(parsed_args.force, is_debug): return 1 return 0
def on_any_event(self, event): build.main([], helper.nice_date())
def evaluate(sb, options): state = EvalState() phase_argv = ['--sandbox', sb.get_root()] with ioutil.WorkingDir(sb.get_root()) as td: with sb.lock('eval') as lock: try: try: if not options.no_update: with Phase(state, EvalPhase.UPDATE, lock) as phase: state.err = update(sb, lock) if not state.err: with Phase(state, EvalPhase.BUILD, lock) as phase: argv = get_build_phase_args(phase_argv, options) state.err = build.main(argv) if not state.err: with Phase(state, EvalPhase.TEST, lock) as phase: argv = get_test_phase_args(["test"], options) state.err = test.main(argv) if (not state.err) and sb.get_sandboxtype().get_should_publish(): with Phase(state, EvalPhase.PUBLISH, lock) as phase: state.err = publish.main(phase_argv) except: txt = traceback.format_exc() print(txt) # It is possible for us to get an exception as we try to enter # a phase (including the first one). In such a case, we need # to work extra hard to help the user understand what's wrong. txt = txt.replace('\r', '').replace('\n', '; ').replace(',', ' ') state.reason = 'exception in build process itself: ' + txt if not state.timestamps: state.timestamps.append(time.time()) state.phase = EvalPhase.UPDATE finally: if os.path.exists(os.path.join(sb.get_root(), 'notify.txt')): if (not sb.get_sandboxtype().get_notify_on_success()) and (not state.err): os.remove('%snotify.txt' % sb.get_root()) else: notify = open('%snotify.txt' % sb.get_root(), 'r') emails = notify.read() notify.close() body = '' if os.path.exists(os.path.join(sb.get_root(), 'eval-log.txt')): body = os.path.join(sb.get_root(), 'eval-log.txt') os.remove('%snotify.txt' % sb.get_root()) bi = buildinfo.BuildInfo() if state.err: status = 'Failed' else: status = 'Succeeded' subject = '%s build of %s on %s %s.' % (sb.get_variant(), sb.get_top_component(), bi.host, status) arguments = '--to %s --sender sadm --subject "%s" --host smtp.example.com --port 587' % (emails, subject) # TODO KIM TO CONF arguments += ' --username [email protected] --password password' # TODO KIM TO CONF if body: arguments += ' --body "%s"' % body os.system('python %s/buildscripts/mailout.py %s' % (sb.get_code_root(), arguments)) if not state.err: state.reason = '' if _should_report(sb, options): report(sb, state) else: print('Skipping report phase.') return state.err
def main(args): parser = argparse.ArgumentParser( description='User facing build script for building the Shaka' ' Player Project.') parser.add_argument( '--locales', type=str, nargs='+', default=generateLocalizations.DEFAULT_LOCALES, help='The list of locales to compile in (default %(default)r)') parser.add_argument( '--fix', help='Automatically fix style violations.', action='store_true') parser.add_argument( '--force', '-f', help='Force building the library even if no files have changed.', action='store_true') parser.add_argument( '--debug', help='Limit which build types to build. Will at least build the debug ' 'version.', action='store_true') parser.add_argument( '--release', help='Limit which build types to build. Will at least build the ' 'release version.', action='store_true') parsed_args = parser.parse_args(args) # Make the dist/ folder, ignore errors. base = shakaBuildHelpers.get_source_base() try: os.mkdir(os.path.join(base, 'dist')) except OSError: pass # Generate localizations before running gendeps, so the output is available # to the deps system. # TODO(#1858): It might be time to look at a third-party build system. localizations = compiler.GenerateLocalizations(parsed_args.locales) if not localizations.generate(parsed_args.force): return 1 if gendeps.main([]) != 0: return 1 check_args = [] if parsed_args.fix: check_args += ['--fix'] if parsed_args.force: check_args += ['--force'] if check.main(check_args) != 0: return 1 docs_args = [] if parsed_args.force: docs_args += ['--force'] if docs.main(docs_args) != 0: return 1 if not compile_less('ui', 'controls', parsed_args): return 1; # if not compile_less('demo', 'demo', parsed_args): # return 1 build_args_with_ui = ['--name', 'ui', '+@complete'] build_args_with_ui += ['--locales'] + parsed_args.locales build_args_without_ui = ['--name', 'compiled', '+@complete', '-@ui'] if parsed_args.force: build_args_with_ui += ['--force'] build_args_without_ui += ['--force'] # Create the list of build modes to build with. If the list is empty # by the end, then populate it with every mode. modes = [] modes += ['debug'] if parsed_args.debug else [] modes += ['release'] if parsed_args.release else [] # If --debug or --release are not given, build with everything. if not modes: modes += ['debug', 'release'] for mode in modes: # Complete build includes the UI library, but it is optional and player lib # should build and work without it as well. # First, build the full build (UI included) and then build excluding UI. for build_args in [build_args_with_ui, build_args_without_ui]: if build.main(build_args + ['--mode', mode]) != 0: return 1 is_debug = mode == 'debug' if not apps.build_all(parsed_args.force, is_debug): return 1 return 0
vcs_args=['git', '--git-dir', '%(root)s/.git', 'describe', '--tags', '--long', '--abbrev=999'], ) def pad_version(v): split = v.split('.') return '.'.join(split + ['0'] * (3 - len(split))) version = '.'.join(( pad_version(os.environ['PYQT5_VERSION']), version.version, )) sys.stderr.write('another stderr test from {}\n'.format(__file__)) results = build.main() with open('README.rst') as f: readme = f.read() setuptools.setup( name="pyqt5-tools", description="Tools to supplement the official PyQt5 wheels", long_description=readme, long_description_content_type='text/x-rst', url='https://github.com/altendky/pyqt5-tools', author="Kyle Altendorf", author_email='*****@*****.**', license='GPLv3', classifiers=[ # complete classifier list: https://pypi.org/pypi?%3Aaction=list_classifiers
def build(request): if request.method == "POST": postData = re.split(r'\{\"(.+|\[+|\]+)\}', str(request.body))[1] postData = '{"' + postData + '}' jsonData = json.loads(postData) botIP = None botPort = None featList = {} # actionList will be used again when we send the list to the # BotMaster to record this Bots capabilites actionList = [] if 'Execution Conditions' in jsonData: featList['Execution Conditions'] = [] # gets a dict of all features in this category exeConds = jsonData['Execution Conditions'] for f in exeConds: # str(f) gets the string representation of the feature # option in a list of 'option' checkbox values for option in exeConds[str(f)]: # if the option that was checked was 'Yes' # add the feature to the list if option == "Yes": featList['Execution Conditions'].append(str(f)) if 'Options' in jsonData: featList['Options'] = [] # gets a dict of all features in this category opts = jsonData['Options'] for o in opts: # str(o) gets the string representation of the feature # option in a list of 'option' checkbox values for option in opts[str(o)]: # if the option that was checked was 'Yes' # add the feature to the list if option == "Yes": featList['Options'].append(str(o)) # if the option that was checked was 'Encode HTTP' # let the botmaster know it can encode its http data actionList.append(str(o)) if 'Executable Features' in jsonData: featList['On Execution'] = [] featList['As Remote Action'] = [] # gets an dict of all features in this category exeFeats = jsonData['Executable Features'] for f in exeFeats: # str(f) gets the string representation of the feature # option in a list of 'option' checkbox values for option in exeFeats[str(f)]: # if the option that was checked was 'Yes' # add this action to the list of actions the botmaster can sends actionList.append(str(f)) # add the feature to the list if option == "On Execution": featList['On Execution'].append(str(f)) if option == "As Remote Action": featList['As Remote Action'].append(str(f)) if 'Action Sequence' in jsonData: featList['Action Sequence'] = [] # gets an dict of all features in this category actionSeq = jsonData['Action Sequence'] for f in actionSeq: # str(f) gets the string representation of the feature # option in a list of 'option' checkbox values for option in actionSeq[str(f)]: # if the option that was checked was 'Yes' # add this action to the list of actions the botmaster can sends actionList.append(str(f)) # add the feature to the list if option == "Yes": featList['Action Sequence'].append(str(f)) if 'Failed Condition Handler' in jsonData: featList['Failed Condition Handler'] = [] # gets a dict of all features in this category failedHandler = jsonData['Failed Condition Handler'] for o in failedHandler: # str(o) gets the string representation of the feature # option in a list of 'option' checkbox values for option in failedHandler[str(o)]: # if the option that was checked was 'Yes' # add the feature to the list if option == "Yes": featList['Options'].append(str(o)) # if the option that was checked was 'Encode HTTP' # let the botmaster know it can encode its http data actionList.append(str(o)) if 'Pull Times' in jsonData: featList['Pull Times'] = [] times = jsonData['Pull Times'] for t in times: featList['Pull Times'].append(str(t)) if 'Action Limit' in jsonData: featList['Action Limit'] = jsonData['Action Limit'] if 'Action Frequency' in jsonData: featList['Action Frequency'] = jsonData['Action Frequency'] if 'Maximum Action Limit' in jsonData: featList['Maximum Action Limit'] = jsonData['Maximum Action Limit'] if 'Botmaster IP' in jsonData: featList['Botmaster IP'] = jsonData['Botmaster IP'] botIP = jsonData['Botmaster IP'] if 'Botmaster Port' in jsonData: featList['Botmaster Port'] = jsonData['Botmaster Port'] botPort = jsonData['Botmaster Port'] exePath, botID = main(featList) if exePath != -1: # if it failed the result will be -1 otherwise it'll be the path to the exe exePath = re.sub(r'\\', '/', exePath) filename = 'http://' + request.get_host() + '/' + exePath # send POST to the BotMaster with the bot's ID and a list of its available add_func_definitions success = sendBotActionList(botIP, botPort, actionList, botID) # if success == True: if True: return HttpResponse('200 ' + filename) return HttpResponse(503)
# Our libs import config import build from basehandler import BaseHandler class MainHandler(BaseHandler): def get(self): context = {'message': 'Hello World'} self.render('index.html', context=context) def get_handlers(): return [ (r"/", MainHandler), ] if __name__ == "__main__": parser = OptionParser() parser.add_option('-p', '--production', action='store_true', dest='production', default=False) (options, args) = parser.parse_args() build.main(debug=not options.production) settings = config.tornado_settings application = tornado.web.Application(get_handlers(), **settings) application.debug = not options.production server = HTTPServer(application) sockets = bind_sockets(int(config.application_port), address=config.application_ip) tornado.process.fork_processes(1 if not options.production else 0) server.add_sockets(sockets) IOLoop.instance().start()
"texture", "texture3d", "texturearray", "texturecubemap", "texturemipmapgen", "triangle", "viewportarray", "vulkanscene" ] COLOR_GREEN = '\033[92m' COLOR_END = '\033[0m' CURR_INDEX = 0 parser = argparse.ArgumentParser() parser.add_argument('-deploy', default=False, action='store_true', help="install examples on device") parser.add_argument('-validation', default=False, action='store_true') args = parser.parse_args() print("Building all examples...") for example in EXAMPLES: print(COLOR_GREEN + "Building %s (%d/%d)" % (example, CURR_INDEX, len(EXAMPLES)) + COLOR_END) if not build.main(example, args.deploy, args.validation): print("Error during build process for %s" % example) sys.exit(-1) CURR_INDEX += 1 print("Successfully build %d examples" % CURR_INDEX)