def collect_job(): config = utils.get_config() disks = config[utils.DISK_SECTION] interfaces = config[utils.INET_SECTION] account = Account(config[utils.GENERAL_SECTION].get('email'), config[utils.GENERAL_SECTION].get('user_key'), config[utils.GENERAL_SECTION].get('api_key')) report = {} usage = {} net = {} if os.name == 'nt': report['os'] = platform.system()+"-"+platform.win32_ver()[0]+" "+platform.win32_ver()[2] report['arch'] = platform.architecture()[0] else: report['loadAverage'] = {} if not os.name == 'nt': for idx, la in enumerate(os.getloadavg()): time_la = "1" if idx == 0 else "5" if idx == 2 else "15" report['loadAverage'][time_la] = "{0:.2f}".format(la) if platform.system() == 'Linux': report['os'] = platform.linux_distribution()[0]+"-"+platform.linux_distribution()[1]+" "+platform.linux_distribution()[2] report['arch'] = platform.architecture()[0] else: report['os'] = "Mac OS X - "+platform.mac_ver()[0] report['arch'] = platform.architecture()[0] for disk in disks.keys(): if disks[disk] == utils.ENABLED and check_disk(disk): usage_temp = psutil.disk_usage(disk) usage[disk] = {'total': usage_temp.total, 'used': usage_temp.used, 'free': usage_temp.free, 'percentage': usage_temp.percent} for interf in interfaces.keys(): if interfaces[interf] == utils.ENABLED: net_temp = dict((k.lower(),v) for k, v in psutil.net_io_counters(pernic=True).iteritems())[interf] net[interf] = {'sent': net_temp.bytes_sent, 'recv': net_temp.bytes_recv} report['inet'] = net report['disks'] = usage report['processes'] = {'value': len(psutil.pids())} report['loadAverage'] = {} if not os.name == 'nt': for idx, la in enumerate(os.getloadavg()): time_la = "1" if idx == 0 else "5" if idx == 2 else "15" report['loadAverage'][time_la] = "{0:.2f}".format(la) report['users'] = {'value': len(psutil.users())} report['uptime'] = str(datetime.now() - datetime.fromtimestamp(psutil.boot_time())).split('.')[0] report['kindDevice'] = 3 api_key = account.api_key url = "%s/%s" % (system_config['ROUTES'].get('collect'), config[utils.GENERAL_SECTION].get('serial')) params = {'apiKey': api_key, 'data': json.dumps(report)} try: response = http.request('POST', url, params, {'user-key': account.user_key}, encode_multipart=False) except Exception, e: console.error("Check your connection") return
def authenticate(serial=None, email=None, password=None): serial = raw_input("Serial:") if not serial else serial email = raw_input("E-mail:") if not email else email password = getpass.getpass("Password:"******"Serial, E-mail or Password Invalid!") else: http = urllib3.PoolManager() url = system_config['ROUTES'].get('auth') params = {'email': email, 'password': password} account = Account response = {} try: response = http.request('POST', url, params, encode_multipart=False) except Exception, e: console.error("Check your connection", exc_info=True) return None if response.status == 200: data = json.loads(response.data) return {'serial': serial, 'account': account(data['email'], response.getheaders()['user-key'], data['apiKey'])} else: console.error(response.data, True) return None
def run(self): """Run all the cc_test target programs. """ failed_targets = [] self._generate_inctest_run_list() tests_run_list = [] for target in self.targets.values(): if not (target['type'] == 'cc_test' or target['type'] == 'dynamic_cc_test'): continue if (not self.run_all_reason) and target not in self.inctest_run_list: if not target.get('options', {}).get('always_run', False): self.skipped_tests.append((target['path'], target['name'])) continue self._prepare_env(target) cmd = [os.path.abspath(self._executable(target))] cmd += self.options.args sys.stdout.flush() # make sure output before scons if redirected test_env = dict(os.environ) environ_add_path(test_env, 'LD_LIBRARY_PATH', self._runfiles_dir(target)) if console.color_enabled: test_env['GTEST_COLOR'] = 'yes' else: test_env['GTEST_COLOR'] = 'no' test_env['GTEST_OUTPUT'] = 'xml' test_env['HEAPCHECK'] = target.get('options', {}).get('heap_check', '') tests_run_list.append((target, self._runfiles_dir(target), test_env, cmd)) concurrent_jobs = 0 concurrent_jobs = self.options.test_jobs scheduler = TestScheduler(tests_run_list, concurrent_jobs, self.tests_run_map) scheduler.schedule_jobs() self._clean_env() console.info("%s Testing Summary %s" % (self.title_str, self.title_str)) console.info("Run %d test targets" % scheduler.num_of_run_tests) failed_targets = scheduler.failed_targets if failed_targets: console.error("%d tests failed:" % len(failed_targets)) for i in failed_targets: print "%s/%s, exit code: %s" % ( i["path"], i["name"], i["test_exit_code"]) test_file_name = os.path.abspath(self._executable(i)) # Do not skip failed test by default if test_file_name in self.test_stamp['md5']: self.test_stamp['md5'][test_file_name] = (0, 0) console.info("%d tests passed" % ( scheduler.num_of_run_tests - len(failed_targets))) self._finish_tests() return 1 else: console.info("All tests passed!") self._finish_tests() return 0
def run(args, timeout=None, stdout=None, exit_on_error=True): console.message("Running task: %s" % " ".join(args)) p = subprocess.Popen(args, stdout=stdout, stderr=subprocess.PIPE, bufsize=1) thread = _attach_stderr_listener(p) if timeout is not None: thread.join(timeout) p.poll() if p.returncode == None: p.kill() if exit_on_error: raise error.ProletarianBuildException("Process '%s' timed out after %s seconds" % (args[0], timeout)) else: console.error("Process '%s' failed to finish in the specified time (%s seconds)" % (args[0], timeout)) else: thread.join() # Uses a busy waiting loop instead of communicate() or wait() because # this should not happen frequently (i.e. the thread should join when # the process terminates) and it has less risk of starvation. while p.returncode != 0: time.sleep(0.1) p.poll() if exit_on_error and p.returncode != 0: raise error.ProletarianBuildException("Process '%s' had a non-zero return code (%s)" % (args[0], p.returncode))
def __dogit(packpath, dest, version = ""): # Require git on the system to have it puke.System.check_package('git') # If directory exist, then update the tree if puke.FileSystem.exists(dest): console.info('Updating') that = 'cd %s; git stash; git stash drop' % dest puke.sh(that, output=True) that = 'cd %s; git checkout master; git pull --rebase; ' % dest else: if not puke.FileSystem.exists(puke.FileSystem.dirname(dest)): puke.FileSystem.makedir(puke.FileSystem.dirname(dest)) console.info('Cloning') that = 'cd %s; git clone %s; ' % (puke.FileSystem.dirname(dest), packpath) if version: that = "%s git checkout %s" % (that, version) # Do the deed try: std = puke.Std() puke.sh(that, std=std, output=True) if std.err: raise std.err except: # if puke.FileSystem.exists(dest): # puke.FileSystem.remove(dest) console.error('Git operation failed! %s You need to manually fix or remove the directory.' % std.err)
def exist(rpname): """Check if a port exists, note rpname is repo/port""" if os.path.exists(core.config.dir_tree + rpname + '/PORTBUILD'): return True else: console.error('Port ' + rpname + ' not found') exit()
def _run_job_redirect(self, job, job_thread): """run job and redirect the output. """ target, run_dir, test_env, cmd = job test_name = target.fullname shell = target.data.get('run_in_shell', False) if shell: cmd = subprocess.list2cmdline(cmd) timeout = target.data.get('test_timeout') self._show_progress(cmd) p = subprocess.Popen(cmd, env=test_env, cwd=run_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, shell=shell) job_thread.set_job_data(p, test_name, timeout) stdout = p.communicate()[0] result = self._get_result(p.returncode) msg = 'Output of %s:\n%s\n%s finished: %s\n' % ( test_name, stdout, test_name, result) if console.verbosity_le('quiet') and p.returncode != 0: console.error(msg, prefix=False) else: console.info(msg) console.flush() return p.returncode
def _check_code_style(opened_files): if not opened_files: return 0 cpplint = configparse.blade_config.configs["cc_config"]["cpplint"] if not cpplint: console.info("cpplint disabled") return 0 console.info("Begin to check code style for changed source code") p = subprocess.Popen(("%s %s" % (cpplint, " ".join(opened_files))), shell=True) try: p.wait() if p.returncode: if p.returncode == 127: msg = ( "Can't execute '{0}' to check style, you can config the " "'cpplint' option to be a valid cpplint path in the " "'cc_config' section of blade.conf or BLADE_ROOT, or " "make sure '{0}' command is correct." ).format(cpplint) else: msg = "Please fixing style warnings before submitting the code!" console.warning(msg) except KeyboardInterrupt, e: console.error(str(e)) return 1
def make_doc(source_path, doc_path, packages): if not common.which(DOC_PROGRAM): console.error('[' + DOC_PROGRAM + '] is not available.') console.error('Please make sure [' + DOC_PROGRAM + '] is in your python path') return if not os.path.exists(doc_path): os.mkdir(doc_path) # List up packages with its absolute path packages_by_name = {p.name: source_path + '/' + path for path, p in packages.iteritems()} doc_output = {} console.pretty_println('Generating documents in ' + doc_path, console.cyan) for name, path in packages_by_name.items(): console.pretty_println(' ' + name) output = generate_doc(name, path, doc_path) doc_output[name] = output generates_index_page(doc_path, packages_by_name.keys()) console.pretty_println('') console.pretty_println('Document generation result. 0 may mean error. But it is fine most of time', console.bold_white) for name, err in doc_output.items(): console.pretty_print(name, console.cyan) console.pretty_print(' : ') console.pretty_println(str(err))
def run_code_or_fail(processes): for process in filter(lambda p: p.is_alive(), processes): process.join() for process in processes: if process.exitcode != 0: console.error("Process had a non-zero return code (%s)" % process.exitcode, exit=True)
def create(self, *args, **kwargs): try: self.score = kwargs['score'] except KeyError: console.error('Score not sent to GameOverScreen. No stats recorded.') self.log_score() self.create_ui()
def set_var(var_name, value=None): #tipagem dinamica, um int pode virar string por exemplo #cria ou seta a váriavel if(isinstance(var_name, str)): dict_variables[var_name] = value else: console.error(exception.declare_var_invalid_type)
def del_var(var_name): if(isinstance(var_name, str)): if(var_name in dict_variables): del dict_variables[var_name] else: console.error(exception.get_var_not_exists) else: console.error(exception.declare_var_invalid_type)
def update_config(self, section_name, append, user_config): """update config section by name. """ config = self.configs.get(section_name, {}) if config: if append: self._append_config(section_name, config, append) self._replace_config(section_name, config, user_config) else: console.error("%s: %s: unknown config section name" % (self.current_file_name, section_name))
def distro_version(): ''' This code is pulled from rosversion, which unfortunately has it buried inside the script, not in the python module. ''' if 'ROS_DISTRO' in os.environ: return os.environ['ROS_DISTRO'] else: console.error("could not determine the rosdistro") sys.exit(1)
def require_root(): # This cygwin is for initial run before config file exists only cygwin = False if not hasattr(config, 'cygwin'): cygwin = True else: cygwin = config.cygwin if cygwin == False: if os.geteuid() != 0: console.error("This section of manup must be run as root") exit()
def __init__(self, name): yawn = puke.FileSystem.join('technicolor', '%s.yaml' % name) if not puke.FileSystem.exists(yawn): console.fail('The requested yawn (%s, at path %s) doesn\'t exist!' % (name, yawn)) data = yaml.load(puke.FileSystem.readfile(yawn)) self.name = name # Extend this so that multiple targets can be supported self.done = {} if 'LICENSE' in data: self.license = data['LICENSE'] else: console.error('We are missing license information!') self.license = 'UNKNOWN' if 'VERSION' in data: self.version = data['VERSION'] else: console.warn('We are missing version information') self.version = 'No version specified!' if 'URL' in data: self.url = data['URL'].replace('{VERSION}', self.version) self.__checksum = data['CHECKSUM'] if 'LOCAL' in data: self.local = data['LOCAL'].replace('{VERSION}', self.version) else: self.local = self.url.split('/').pop().split('.').shift() self.production = data['PRODUCTION'] else: console.info('This is a dummy package') self.url = '' self.__checksum = '' self.local = '' self.production = '' self.hack = False if 'HACK' in data: self.hack = data['HACK'] if 'DEPENDS' in data: self.depends = data['DEPENDS'] if isinstance(self.depends, str) and self.depends: self.depends = [self.depends] else: self.depends = [] if 'TOOLCHAIN' in data: self.toolchain = data['TOOLCHAIN'] if isinstance(self.toolchain, str) and self.toolchain: self.toolchain = [self.toolchain] else: self.toolchain = []
def run(self): """Run all the cc_test target programs. """ failed_targets = [] self._generate_inctest_run_list() tests_run_list = [] for target in self.targets.values(): if target.type != "cc_test": continue if (not self.run_all_reason) and target not in self.inctest_run_list: if not target.data.get("always_run"): self.skipped_tests.append((target.path, target.name)) continue self._prepare_env(target) cmd = [os.path.abspath(self._executable(target))] cmd += self.options.args sys.stdout.flush() # make sure output before scons if redirected test_env = dict(os.environ) environ_add_path(test_env, "LD_LIBRARY_PATH", self._runfiles_dir(target)) if console.color_enabled: test_env["GTEST_COLOR"] = "yes" else: test_env["GTEST_COLOR"] = "no" test_env["GTEST_OUTPUT"] = "xml" test_env["HEAPCHECK"] = target.data.get("heap_check", "") tests_run_list.append((target, self._runfiles_dir(target), test_env, cmd)) concurrent_jobs = 0 concurrent_jobs = self.options.test_jobs scheduler = TestScheduler(tests_run_list, concurrent_jobs, self.tests_run_map) scheduler.schedule_jobs() self._clean_env() console.info("%s Testing Summary %s" % (self.title_str, self.title_str)) console.info("Run %d test targets" % scheduler.num_of_run_tests) failed_targets = scheduler.failed_targets if failed_targets: console.error("%d tests failed:" % len(failed_targets)) for target in failed_targets: print "%s:%s, exit code: %s" % (target.path, target.name, target.data["test_exit_code"]) test_file_name = os.path.abspath(self._executable(target)) # Do not skip failed test by default if test_file_name in self.test_stamp["md5"]: self.test_stamp["md5"][test_file_name] = (0, 0) console.info("%d tests passed" % (scheduler.num_of_run_tests - len(failed_targets))) self._finish_tests() return 1 else: console.info("All tests passed!") self._finish_tests() return 0
def create_pid_file(self): pid = str(os.getpid()) pid_file = self.get_file("guardiancl.pid") if os.path.exists(pid_file): pid_old = int(open(pid_file).read()) if psutil.pid_exists(pid_old): console.error("Script already running!", True) sys.exit(2) else: target = open(pid_file, 'w') target.write(pid) else: target = open(pid_file, 'w') target.write(pid)
def get_configuration_from_file(filename): try: defaultrc_json = get_defaultrc_json() user_rc_json = open_json(filename) rc_json = jsonmerge.merge( defaultrc_json, user_rc_json ) validate_configuration(rc_json) return compile_configuration(rc_json) except FileNotFoundError as e: console.error(e.__doc__) return None
def build(options): _check_code_style(_TARGETS) console.info('building...') console.flush() if config.get_item('global_config', 'native_builder') == 'ninja': returncode = _ninja_build(options) else: returncode = _scons_build(options) if returncode != 0: console.error('building failure.') return returncode if not blade.blade.verify(): console.error('building failure.') return 1 console.info('building done.') return 0
def _append_config(self, section_name, config, append): """Append config section items""" if not isinstance(append, dict): console.error("%s: %s: append must be a dict" % (self.current_file_name, section_name)) else: for k in append.keys(): if k in config: if isinstance(config[k], list): config[k] += var_to_list(append[k]) else: console.warning( "%s: %s: config item %s is not a list" % (self.current_file_name, section_name, k) ) else: console.warning("%s: %s: unknown config item name: %s" % (self.current_file_name, section_name, k))
def _append_config(self, section_name, config, append): """Append config section items""" if not isinstance(append, dict): console.error('%s: %s: append must be a dict' % (self.current_file_name, section_name)) else: for k in append: if k in config: if isinstance(config[k], list): config[k] += var_to_list(append[k]) else: console.warning('%s: %s: config item %s is not a list' % (self.current_file_name, section_name, k)) else: console.warning('%s: %s: unknown config item name: %s' % (self.current_file_name, section_name, k))
def _build(options): if options.scons_only: return 0 scons_options = '--duplicate=soft-copy --cache-show' scons_options += " -j %s %s" % ( options.jobs, '-k' if options.keep_going else '') p = subprocess.Popen("scons %s" % scons_options, shell=True) try: p.wait() if p.returncode: console.error("building failure") return p.returncode except: # KeyboardInterrupt return 1 return 0
def _process_job(self, job, redirect, job_thread): """process routine. Each test is a tuple (target, run_dir, env, cmd) """ target = job[0] start_time = time.time() try: if redirect: returncode = self._run_job_redirect(job, job_thread) else: returncode = self._run_job(job, job_thread) except OSError, e: console.error('%s: Create test process error: %s' % (target.fullname, str(e))) returncode = 255
def check_job_timeout(self, now): """Check whether the job is timeout or not. This method simply checks job timeout and returns immediately. The caller should invoke this method repeatedly so that a job which takes a very long time would be timeout sooner or later. """ try: self.job_lock.acquire() if (not self.job_is_timeout and self.job_start_time and self.job_timeout is not None and self.job_name and self.job_process is not None): if self.job_start_time + self.job_timeout < now: self.job_is_timeout = True console.error('%s: TIMEOUT\n' % self.job_name) self.job_process.terminate() finally: self.job_lock.release()
def _build(options): if options.scons_only: return 0 scons_options = '--duplicate=soft-copy --cache-show' scons_options += ' -j %s' % options.jobs if options.keep_going: scons_options += ' -k' p = subprocess.Popen('scons %s' % scons_options, shell=True) try: p.wait() if p.returncode: console.error('building failure') return p.returncode except: # KeyboardInterrupt return 1 return 0
def step_download(pb): cachedir = core.config.dir_cache + pb.repo cachefile = core.config.dir_cache + pb.repo + '/' + pb.filename firsttime = False if not os.path.exists(cachedir): os.mkdir(core.config.dir_cache + pb.repo) if not os.path.exists(cachefile): # Port source not found in cache, download it console.line('Downloading ' + pb.fileurl) #port.pbrun(pb, core.config.cmd_down + ' ' + pb.fileurl + ' -P ' + cachedir) port.pbrun(pb, core.config.cmd_down.replace('$source', pb.fileurl).replace('$dest', cachedir)) firsttime = True # Check cached source md5sum and re-download if changed proc = subprocess.Popen(core.config.cmd_md5.replace('$file', cachefile), universal_newlines=True, executable='bash', shell=True, stdout=subprocess.PIPE).stdout cachefile_md5 = proc.read().strip() # if PORTBUILD md5sum is blank must download everytime if pb.md5sum != '' and pb.md5sum == cachefile_md5: if not firsttime: console.line('Local cached version of ' + pb.filename + ' found') else: if not firsttime: console.line('Downloading ' + pb.filename) os.system('/bin/rm -rf ' + cachefile) #port.pbrun(pb, core.config.cmd_down + ' ' + pb.fileurl + ' -P ' + cachedir) port.pbrun(pb, core.config.cmd_down.replace('$source', pb.fileurl).replace('$dest', cachedir)) # Compare md5 again proc = subprocess.Popen(core.config.cmd_md5.replace('$file', cachefile), universal_newlines=True, executable='bash', shell=True, stdout=subprocess.PIPE).stdout cachefile_md5 = proc.read().strip() if pb.md5sum != '' and pb.md5sum != cachefile_md5: console.error("File failed to pass md5 check sum. File corrupt or Portbuild and source file md5 differ") exit() if not os.path.exists(cachefile): console.error("Could not download " + pb.filename + " check PORTBUILD file") exit() # Make tmp build dir if not os.path.exists(core.config.dir_build + pb.repo + '/' + pb.name): os.system('/bin/mkdir -p ' + core.config.dir_build + pb.repo + '/' + pb.name)
def run(self): """Run all the test target programs. """ self._collect_test_jobs() tests_run_list = [] for target_key in self.test_jobs: target = self.target_database[target_key] test_env = self._prepare_env(target) cmd = [os.path.abspath(self._executable(target))] cmd += self.options.args if console.color_enabled(): test_env['GTEST_COLOR'] = 'yes' else: test_env['GTEST_COLOR'] = 'no' test_env['GTEST_OUTPUT'] = 'xml' test_env['HEAPCHECK'] = target.data.get('heap_check', '') pprof_path = config.get_item('cc_test_config', 'pprof_path') if pprof_path: test_env['PPROF_PATH'] = os.path.abspath(pprof_path) if self.options.coverage: test_env['BLADE_COVERAGE'] = 'true' tests_run_list.append( (target, self._runfiles_dir(target), test_env, cmd)) console.notice('%d tests to run' % len(tests_run_list)) sys.stdout.flush() scheduler = TestScheduler(tests_run_list, self.options.test_jobs) try: scheduler.schedule_jobs() except KeyboardInterrupt: console.clear_progress_bar() console.error('KeyboardInterrupt, all tests stopped') console.flush() if self.options.coverage: self._generate_coverage_report() self._clean_env() passed_run_results, failed_run_results = scheduler.get_results() self._save_test_history(passed_run_results, failed_run_results) self._show_tests_result(passed_run_results, failed_run_results) return 0 if len(passed_run_results) == len(self.test_jobs) else 1
def _build(options): if options.scons_only: return 0 scons_options = '--duplicate=soft-copy --cache-show' scons_options += ' -j %s' % options.jobs if options.keep_going: scons_options += ' -k' scons_exe = os.environ.get('SCONS_EXE', 'scons') p = subprocess.Popen("%s %s" % (scons_exe, scons_options), shell=True) try: p.wait() if p.returncode: console.error("building failure") return p.returncode except: # KeyboardInterrupt return 1 return 0
def get_devices(account): http = urllib3.PoolManager() url = system_config['ROUTES'].get('devices') if account is not None: try: response = http.request('GET', url, None, {'user-key': account.user_key}) except Exception, e: console.error("Check your connection", exc_info=True) if response.status == 200: devices = [] for dev in json.loads(response.data): devices.append(Device(dev['serial'], dev['description'])) return devices else: data = json.loads(response.data) console.error(data['status']) return None
def 命令_添加物品(物品名称或ID, 数量): id = 0 count = 0 try: id = int(物品名称或ID) except: import config for itemType in config.ItemList: for item in config.ItemList[itemType]: if item.name.Contains(物品名称或ID): id = item.id break try: count = int(数量) except StandardError, e: console.error( "输入参数 <color=#EE7AE9>%s</color> 不是数字,请输入数字,例如 <color=#EE7AE9>100</color>" % 数量) return
def _check_thrift_srcs_name(self, srcs): """_check_thrift_srcs_name. Checks whether the thrift file's name ends with 'thrift'. """ error = 0 for src in srcs: base_name = os.path.basename(src) pos = base_name.rfind('.') if pos == -1: console.error('invalid thrift file name %s' % src) error += 1 file_suffix = base_name[pos + 1:] if file_suffix != 'thrift': console.error('invalid thrift file name %s' % src) error += 1 if error > 0: console.error_exit('invalid thrift file names found.')
def display_info(): global procs, queue, status while True: con.clear() try: for index, (process_id, process, queue) in enumerate(procs): try: status[index] = queue.get_nowait() except Empty: pass print('\r', status[index]) except KeyboardInterrupt as e: for process, queue in procs: process.terminate() process.join() break except Exception as e: con.error(e) else: time.sleep(args.interval)
def run(self, args): logging.debug('args: %s' % args) os.chdir(self.root_dir) if args.target != None: work_dir = os.path.join(self.work_dir, args.target) self.set_work_dir(work_dir) logging.debug(self.get_work_dir()) if args.cmd == None or args.cmd == 'build' or args.cmd == 'b': self.build() elif args.cmd == 'test' or args.cmd == 't': self.test() else: console.error('Invalid command: %s' % args.cmd) console.succ('Init done') self.exec_scons()
def verify(args: argparse.Namespace): con.pretty_printing = args.colors try: # Verify number of ports equal the number of servers print("Validating server count ... ", end="") assert args.number == len(args.ports) con.success("OK") print("Validating server ports ... ") for port in args.ports: print("\t{} ... ".format(port), end="") assert network.check_sock(network.get_local_ip(), port) con.success("OK") # Verify a valid file was passed print("Validating file ... ", end="") assert pathlib.Path(args.file).is_file() con.success("OK") return args except AssertionError: con.error("FAILED") quit(1)
def _show_tests_summary(self, scheduler): """Show tests summary. """ run_tests = scheduler.num_of_run_tests console.info('{0} Testing Summary {0}'.format(self.title)) self._show_skipped_tests_summary() console.info('Run %d tests' % run_tests) failed_targets = scheduler.failed_targets if failed_targets: console.error('%d tests failed:' % len(failed_targets)) for target in failed_targets: print >>sys.stderr, '%s, exit code: %s' % ( target.fullname, target.data['test_exit_code']) test_file_name = os.path.abspath(self._executable(target)) # Do not skip failed test by default if test_file_name in self.test_stamp['md5']: self.test_stamp['md5'][test_file_name] = (0, 0) console.info('%d tests passed.' % (run_tests - len(failed_targets))) else: console.info('All tests passed!')
def _process_command(self, job_queue, redirect): """process routine. Each test is a tuple (target, run_dir, env, cmd) """ while not job_queue.empty(): job = job_queue.get() target = job[0] target_key = "%s:%s" % (target['path'], target['name']) start_time = time.time() try: if redirect: returncode = self._run_job_redirect(job) else: returncode = self._run_job(job) except OSError as e: console.error("%s: Create test process error: %s" % (target_key, str(e))) returncode = 255 costtime = time.time() - start_time if returncode: target["test_exit_code"] = returncode self.failed_targets_lock.acquire() self.failed_targets.append(target) self.failed_targets_lock.release() self.tests_run_map_lock.acquire() run_item_map = self.tests_run_map.get(target_key, {}) if run_item_map: run_item_map['result'] = self.__get_result(returncode) run_item_map['costtime'] = costtime self.tests_run_map_lock.release() self.num_of_run_tests_lock.acquire() self.num_of_run_tests += 1 self.num_of_run_tests_lock.release() return True
def _start(self): with network.create_server_connection(network.get_local_ip(), self.port) as soc: while True: self.request = None try: soc.listen() if soc: c_soc, _ = soc.accept() request, *params = network.decode_parameter( notnone(network.get_request(c_soc))) self.request = Request(request) self.update() if self.request == Request.CHECKSUM: network.send_request( c_soc, network.encode_parameter( file.gen_checksum(self.src))) elif self.request == Request.FILE_NAME: network.send_request( c_soc, network.encode_parameter( file.get_file_name(self.src))) elif self.request == Request.FILE_SIZE: network.send_request( c_soc, network.encode_parameter( str(file.get_size(self.src)))) elif self.request == Request.TRANSFER: params = [p for p in params] start, end = int(params[0]), int(params[1]) with open(self.src, 'rb') as f: f.seek(start) data = f.read(end - start) network.send_request(c_soc, data) c_soc.close() except (OSError, AssertionError) as e: con.error("Error occurred: {}".format(e)) with open('log_server.log', 'a+') as f: f.write('[{}] ERROR {}'.format(datetime.datetime.now(), e))
def exec_scons(self): #options scons_options = ' --duplicate=soft-copy --cache-show -Q ' #scons_options += ' -j %s' % options.jobs scons_options += ' -j 8 ' #if options.keep_going: # scons_options += ' -k' logging.debug('scons_options: %s' % scons_options) logging.debug('cwd: %s' % os.getcwd()) p = subprocess.Popen("scons %s" % scons_options, shell=True) try: p.wait() if p.returncode: console.error("Building failed") return p.returncode except: # KeyboardInterrupt return 1 console.succ("Building done") return 0
def _check_code_style(opened_files): if not opened_files: return 0 cpplint = configparse.blade_config.configs['cc_config']['cpplint'] if cpplint == None or cpplint == "": return 0 console.info("Begin to check code style for changed source code") p = subprocess.Popen(("%s %s" % (cpplint, ' '.join(opened_files))), shell=True) try: p.wait() if p.returncode: if p.returncode == 127: msg = "Can't execute '%s' to check style, you can config the 'cpplint' option to be a valid cpplint path in the 'cc_config' section of blade.conf or BLADE_ROOT, or make sure '%s' command is correct." % ( cpplint, cpplint) else: msg = "Please fixing style warnings before submitting the code!" console.warning(msg) except KeyboardInterrupt, e: console.error(str(e)) return 1
def load_static(path): response = Response() path = os.path.abspath(path) try: size = os.stat(path).st_size with open(path, 'rb') as f: response.data = f.read(size) response.status = HTTPResponseCodes.OK response.content_type = None for t in HTTPContentTypes: if path.lower().endswith(t.name.lower()): response.content_type = t if response.content_type is None: response.content_type = HTTPContentTypes.PLAIN except (OSError, FileNotFoundError): console.error('Unable to read the requested file at {}'.format(path)) response.status = HTTPResponseCodes.NOT_FOUND response.content_type = HTTPContentTypes.PLAIN response.data = 'Failure'.encode() return response
def _show_tests_summary(self, scheduler): """Show tests summary. """ run_tests = scheduler.num_of_run_tests console.info('{0} Testing Summary {0}'.format(self.title)) self._show_skipped_tests_summary() console.info('Run %d tests' % run_tests) failed_targets = scheduler.failed_targets if failed_targets: console.error('%d tests failed:' % len(failed_targets)) for target in failed_targets: print >> sys.stderr, '%s, exit code: %s' % ( target.fullname, target.data['test_exit_code']) test_file_name = os.path.abspath(self._executable(target)) # Do not skip failed test by default if test_file_name in self.test_stamp['md5']: self.test_stamp['md5'][test_file_name] = (0, 0) console.info('%d tests passed.' % (run_tests - len(failed_targets))) else: console.info('All tests passed!')
def _wait_worker_threads(self, threads): """Wait for worker threads to complete. """ test_timeout = config.get_item('global_config', 'test_timeout') try: while threads: time.sleep(1) # Check every second now = time.time() dead_threads = [] for t in threads: if t.isAlive(): if test_timeout is not None: t.check_job_timeout(now) else: dead_threads.append(t) for dt in dead_threads: threads.remove(dt) except KeyboardInterrupt: console.error('KeyboardInterrupt: Terminate workers...') for t in threads: t.terminate() raise
def _show_tests_summary(self, passed_run_results, failed_run_results): """Show tests summary. """ self._show_banner('Testing Summary') console.info('%d tests scheduled to run by scheduler.' % (len(self.test_jobs))) if self.skipped_tests: console.info('%d tests skipped when doing incremental test.' % len(self.skipped_tests)) console.info('You can specify --full-test to run all tests.') run_tests = len(passed_run_results) + len(failed_run_results) if len(passed_run_results) == len(self.test_jobs): console.info('All tests passed!') return if failed_run_results: console.info('%d tests passed.' % len(passed_run_results)) console.error('%d tests failed.' % len(failed_run_results)) cancelled_tests = len(self.test_jobs) - run_tests if cancelled_tests: console.error('%d tests cancelled by Ctrl-C' % cancelled_tests)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--train", default=None, type=str, help="path containing training data") parser.add_argument("--valid", default=None, type=str, help="path containing validation data") parser.add_argument("--epochs", default=10, type=int, help="number of epochs to train") parser.add_argument("--name", default=None, type=str, help="name of experiment") parser.add_argument( "--weights", default="weights.h5", type=str, help="h5 file to read/write weights to" ) parser.add_argument("--batch_size", default=16, type=int, help="batch size for training") parser.add_argument( "--load", action="store_true", help="Load previous weights file before starting" ) parser.add_argument("--test", default=None, type=str, help="Test file to infer on every epoch") parser.add_argument("files", nargs="*", default=[]) args = parser.parse_args() post_processor = PostProcessor(args.name) if len(args.files) == 0 and args.train: console.h1("preparing to train on {}".format(args.train)) if args.load: console.log("loading weights from {}".format(args.weights)) post_processor.load_weights(args.weights) console.log("loading data") train = DataGenerator(args.train, args.batch_size) valid = DataGenerator(args.valid, args.batch_size) if args.valid else None console.h1("training") post_processor.train(train, args.epochs, validation_data=valid, test_file=args.test) post_processor.save_weights(args.weights) elif len(args.files) > 0: console.h1("preparing to process", args.files, "...") post_processor.load_weights(args.weights) for f in args.files: post_processor.denoise_from_file(f) else: console.error("please provide data to train on (--train) or files to process")
def find_catkin(base_path, underlays_list=None): ''' Search the underlays looking for catkin's toplevel.cmake and python module. ''' if underlays_list is None: underlays_list = config_cache.get_underlays_list_from_config_cmake( base_path) catkin_toplevel = None catkin_python_path = None catkin_cmake_path = None for underlay in underlays_list: if os.path.isfile( os.path.join(underlay, 'share', 'catkin', 'cmake', 'toplevel.cmake')): # it is in an underlay's install space catkin_cmake_path = os.path.join(underlay, 'share', 'catkin', 'cmake') catkin_toplevel = os.path.join(underlay, 'share', 'catkin', 'cmake', 'toplevel.cmake') if os.path.isfile( os.path.join(underlay, python_setup.get_global_python_destination(), 'catkin', 'builder.py')): catkin_python_path = os.path.join( underlay, python_setup.get_global_python_destination()) break elif os.path.isfile( os.path.join(underlay, python_setup.get_global_python_destination(), 'catkin', '__init__.py')): # it's probably a devel space console.error( 'Error: catkin seems to be buried in a chained devel space - not yet supporting this' ) # catkin_python_path = os.path.join(underlay, python_setup.get_global_python_destination()) break return catkin_toplevel, catkin_python_path, catkin_cmake_path
def _check_code_style(targets): cpplint = config.get_item('cc_config', 'cpplint') if not cpplint: console.info('cpplint disabled') return 0 changed_files = _get_changed_files(targets, _BLADE_ROOT_DIR, _WORKING_DIR) if not changed_files: return 0 console.info('Begin to check code style for changed source code') p = subprocess.Popen(('%s %s' % (cpplint, ' '.join(changed_files))), shell=True) try: p.wait() if p.returncode != 0: if p.returncode == 127: msg = ("Can't execute '{0}' to check style, you can config the " "'cpplint' option to be a valid cpplint path in the " "'cc_config' section of blade.conf or BLADE_ROOT, or " "make sure '{0}' command is correct.").format(cpplint) else: msg = 'Please fixing style warnings before submitting the code!' console.warning(msg) except KeyboardInterrupt, e: console.error(str(e)) return 1
def appendToJsonFile(path, category, new): with open(path, "r") as f: try: content = json.loads(f.read()) try: content[category].append(new) except Exception as e: console.error("Could not append new " + category.replace("s", "")) except Exception as e: console.error("Could not load " + category) with open(path, "w") as f: try: f.write(json.dumps(content)) console.info("New " + category.replace("s", "") + " added") except Exception as e: console.error("Could not save to " + path)
for item in config.ItemList[itemType]: if item.name.Contains(物品名称或ID): id = item.id break try: count = int(数量) except StandardError, e: console.error( "输入参数 <color=#EE7AE9>%s</color> 不是数字,请输入数字,例如 <color=#EE7AE9>100</color>" % 数量) return try: itemInfo = ItemData(id, count, True) except: console.error("物品添加失败,数据库中不存在该物品") return item = clr.Reference[ItemData](itemInfo) bag = Config.BagInfoScript if id != 0 and count > 0 and BaseBag.AddNewItem( bag.id, item, bag.MaxBagCount, bag.buttonPath): console.log( "成功添加 <color=yellow>%d</color> 个 <color=yellow>%s</color>" % (itemInfo.count, Config.GetLangText(ConfigItem.s_table[id].Name))) else: console.error("物品添加失败,ID或物品数量错误") def 命令_杀死周围生物(*可选_排除中立):
import console console.log("Does not run on IDLE.") console.warn("Does not run on IDLE.") console.error("Does not run on IDLE.") console.info("Does not run on IDLE.")