def assemble(self, preset, runes) -> Build: print('building {} with strategy {}'.format(preset.monster.name, self)) strategy_runes = { 1: list([r for r in runes[1] if self.is_suitable(r)]), 2: list([r for r in runes[2] if self.is_suitable(r)]), 3: list([r for r in runes[3] if self.is_suitable(r)]), 4: list([r for r in runes[4] if self.is_suitable(r)]), 5: list([r for r in runes[5] if self.is_suitable(r)]), 6: list([r for r in runes[6] if self.is_suitable(r)]), } best_build = Build(preset) if self.can_build(strategy_runes): futures = [] for build_order in directions(strategy_runes): futures.append( thread_pool.submit(assemble, runes=strategy_runes, slots=build_order, build=Build(preset))) for next_build in as_completed(futures): best_build = pick_best(best_build, next_build.result()) return best_build else: print('cant build this monster using {} strategy'.format(self)) return best_build
def build(builds, original_text, settings, incremental, fmt, files=None): """ Start a build for this corpus. If it is already running, join it. Messages from the build are received on a queue. """ if not files: build = Build(original_text, settings) else: build = Build(original_text, settings, files=files) # Start build or listen to existing build if build.build_hash not in builds: builds[build.build_hash] = build build.make_files() t = Thread(target=Build.run, args=[build, fmt]) t.start() # elif builds[build.build_hash].status == (Status.Error or Status.ParseError): # log.info("Errorneous build found! Retrying...") # t = Thread(target=Build.run, args=[build, fmt]) # t.start() else: build = builds[build.build_hash] log.info( "Joining existing build (%s) which started at %s" % (build.build_hash, pretty_epoch_time(build.status_change_time))) if files: return join_build(build, incremental, fileupload=True) else: return join_build(build, incremental)
def GET(self, type, number): number_int = int(number) max = len(tests_model[type]) if number_int > max: return "Wrong test number" name = type test = 'test' + number + '.js' model = tests_model[type][number_int - 1] title = model['Title'] extras = model['Extras'][:] if 'Build' in model: build_config = model['Build'] else: build_config = [type] build = Build().build(build_config) includes = { 'left': getattr(render['TestCases'], type + '/' + 'left')(model, type, number_int, max), 'right': getattr(render['TestCases'], type + '/' + 'test' + number)(model), } return render['TestCases'].basetests(name, title, extras, test, build, includes)
def test_parse_remainder(test_case): b = Build() b.args = Namespace() b._parse_remainder(test_case['r']) assert b.args.source_dir == test_case['s'] assert b.args.cmake_opts == test_case['c']
def _find_build(self, preset): suitable_runes = [ rune for rune in self._runes if not rune.is_locked and rune.set in preset.allowed_sets ] runes_graph = { 1: list([r for r in suitable_runes if r.slot == 1]), 2: list([r for r in suitable_runes if r.slot == 2]), 3: list([r for r in suitable_runes if r.slot == 3]), 4: list([r for r in suitable_runes if r.slot == 4]), 5: list([r for r in suitable_runes if r.slot == 5]), 6: list([r for r in suitable_runes if r.slot == 6]), } best_build = Build(preset) all_strategies = find_strategies(preset) procs_per_strategy = sorted( set([s.procs_required for s in all_strategies]))[:2] build_strategies = [ s for s in all_strategies if s.procs_required in procs_per_strategy ] for strategy in build_strategies: try: best_build = pick_best(best_build, strategy.assemble(preset, runes_graph)) except Exception as err: print('err', err) traceback.print_exc() best_build.lock() return best_build
def handler(build_event, context): """Process build events. If the build event is for the CodeBuild project this app is managing and it's specifically triggered by a PR, copy copy the build logs to the app S3 bucket and post a link to the logs as a comment on the GitHub PR. """ LOG.debug('Received event: %s', build_event) build = Build(build_event) if not build.is_pr_build(): LOG.debug('Not a PR build') return LOG.info( 'Copying build logs for PR build: project=%s, pr_id=%s, build_logs_url=%s', build.project_name, build.get_pr_id(), build.get_logs_url()) build.copy_logs() if config.DELETE_PREVIOUS_COMMENTS: GITHUB.delete_previous_comments(build) if build.status == 'SUCCEEDED' and not config.COMMENT_ON_SUCCESS: LOG.debug( 'Not publishing comment because build SUCCEEDED but COMMENT_ON_SUCCESS is set to false.' ) else: GITHUB.publish_pr_comment(build)
def summary(builds, newerthan, cache): if os.path.exists(cache): with open(cache, 'rb') as f: buildobjs = pickle.load(f) else: buildobjs = {} for build in builds: path_groups_match = re.search( ('^(?P<build_folder>.*/(?P<job_name>[^/]+)/' 'builds/(?P<build_num>[0-9]+))/'), build) if path_groups_match: path_groups = path_groups_match.groupdict() if path_groups['build_num'] in buildobjs: continue buildobjs[path_groups['build_num']] = Build( build_folder=path_groups['build_folder'], job_name=path_groups['job_name'], build_num=path_groups['build_num']) print_html(buildobjs) # Pickle build objs newer than RETENTION_DAYS to the cache file, so those # logs don't need to be reprocessed on the next run. age_limit = (datetime.datetime.now() - datetime.timedelta(days=RETENTION_DAYS)) cache_dict = {} for num, build in buildobjs.items(): if build.timestamp > age_limit: cache_dict[num] = build with open(cache, 'wb') as f: pickle.dump(cache_dict, f, pickle.HIGHEST_PROTOCOL)
def resume_worker(): for d in get_build_directories(Config.builds_dir): log.info("Reattaching build in directory %s", d) build = builds[d] = Build(None, None, init_from_hash=d, resuming=True) Build.run(build, 'xml')
def main(argv): try: b = Build().build() print(b) except Exception as e: print('error! ', e) R.exit()
def build(): "Build your documentation" docs_path = Path("docs") config = Config.parse_yaml(docs_path / "pagepeach.yaml") build = Build(config) build.build(docs_path)
def main(): rtree = Build() if rtree.is_event_db(): print 'Already present' rtree.remove_events() rtree.add_events() events = rtree.get_event_locations() rtree.add_to_rtree(events, 'event')
def upload(): file = request.files['build'] if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) bObj = Build() bObj.build(app.config["UPLOAD_FOLDER"], app.config['UNZIP_FOLDER'], app.config["FILE_NAME"]) return render_template("success.html")
def get_build(latest_build_id, latest_build): build_matrix = latest_build['build_info']['matrix'] jobs = [] for job in build_matrix: jobs.append(job) filtered_jobs = filter_jobs(jobs) if len(filtered_jobs) == 0: print("We have no jobs after filtering") exit(0) return Build(latest_build_id, filtered_jobs)
def test_build_value_is_not_0_when_rune_equipped(self): build = Build(self._preset) rune = make_rune(slot=1, set='Violent', grade=6, lvl=12, primary=('atk', 118), sub_stats=( ('spd', 10), ('acc', 5), ('atk%', 10), ('hp%', 10), )) self.assertEqual(0.54, round(build.equip(rune).value, 3))
def summary(builds, newerthan, cache): buildobjs = {} if os.path.exists(cache): try: with open(cache, 'rb') as f: buildobjs = pickle.load(f) except Exception as e: buildobjs = {} sys.stderr.write( "Failed to read cache file: {cache}".format(cache=cache)) traceback.print_exc(file=sys.stderr) for build in builds: path_groups_match = re.search( ('^(?P<build_folder>.*/(?P<job_name>[^/]+)/' 'builds/(?P<build_num>[0-9]+))/'), build) if path_groups_match: path_groups = path_groups_match.groupdict() job_name = path_groups['job_name'] build_num = path_groups['build_num'] key = "{job_name}_{build_num}".format( job_name=job_name, build_num=build_num ) if key in buildobjs: continue try: buildobjs[key] = Build( build_folder=path_groups['build_folder'], job_name=path_groups['job_name'], build_num=path_groups['build_num']) sys.stderr.write("OK: {key}\n".format(key=key)) except Exception as e: sys.stderr.write("FAIL: {key} {e}\n".format(key=key, e=e)) traceback.print_exc(file=sys.stderr) print_html(buildobjs) # Pickle build objs newer than RETENTION_DAYS to the cache file, so those # logs don't need to be reprocessed on the next run. age_limit = (datetime.datetime.now() - datetime.timedelta(days=RETENTION_DAYS)) cache_dict = {} for key, build in buildobjs.items(): if build.timestamp > age_limit: cache_dict[key] = build with open(cache, 'wb') as f: pickle.dump(cache_dict, f, pickle.HIGHEST_PROTOCOL)
def problem_add_page(): form = ProblemAddForm() if form.validate_on_submit(): form_title = request.form["title"] form_description = request.form["description"] form_build = request.form["build"] form_solution_r = request.form["solution_r"] form_privacy = request.form["privacy"] problem = Problem(form_title, form_description, form_privacy, form_solution_r) build = Build(form_build) db = current_app.config["db"] problem_key = db.add_problem(problem, build, current_user.email) return redirect(url_for("problem_page", problem_key=problem_key)) return render_template("problem_add.html", form=form)
def build_it(self): if not self.on_builder: self.handler = Build(self.handler.board) self.on_builder = True self.state_change("disabled") self.board_set = self.handler.go_through_queue() # favorite = self.handler.get_favorite(self.board_set) self.board_set_index = 0 self.handler.board = self.board_set[0] self.update_index_entry() self.update_display() self.set_piece_marker = True self.state_change("normal") self.master.deiconify() self.master.focus_force()
def build(card): number_of_cards = int( input("how many cards do you want to use? :")) building = [] while number_of_cards > 0: card_to_build = check_cards( input("with wich card do you want to build?"), table.on_table) number_of_cards -= 1 building.append(card_to_build) self.hand.remove(card) table.on_table.remove(card_to_build) o = Build(card, building) table.on_table.append(o) if o == True: option != "trail"
def profile(): if session["user_id"] is not None: if request.method == 'POST': user = find_user(session.get('user_id')) build = Build('untitled build', 'voxel(0, 0, 0)', user.id) db_session.add(build) db_session.commit() return redirect('/builder/' + str(build.id)) user = find_user(session.get('user_id')) builds = find_user_builds(user.id) print('panda') print(builds) print('panda') return render_template('profile.html', builds=builds) else: print("pandas") print("pandas") redirect('/')
def PollAndUpdate(self): self.error_text = None try: response = requests.get("http://localhost:6119/game") game_info = json.loads(response.text) self.game_time = game_info['displayTime'] for player in game_info['players']: name = player['name'] if self.villain is not None and name == self.villain.name: break elif name != self.cfg.hero: self.villain = Villain(name, self.cfg.data_dir) self.build = Build(player['race'], self.cfg.data_dir) self.view.SetNotesText(self.villain.GetNotes()) break else: self.error_text = 'No villain found.' except Exception as e: self.error_text = "Can't connect to API." print(e) self.UpdateView(self.view_format)
def handler(build_event, context): """Process build events. If the build event is for the CodeBuild project this app is managing and it's specifically triggered by a PR, copy copy the build logs to the app S3 bucket and post a link to the logs as a comment on the GitHub PR. """ LOG.debug('Received event: %s', build_event) build = Build(build_event) if build.project_name != config.PROJECT_NAME: LOG.debug('Not our codebuild project') return if not build.is_pr_build(): LOG.debug('Not a PR build') return LOG.info('Copying build logs for PR build: project=%s, pr_id=%s, build_logs_url=%s', build.project_name, build.get_pr_id(), build.get_logs_url()) build.copy_logs() GITHUB.publish_pr_comment(build)
def make_build(fancy=False): system('rm -rf Jit/*') print "Building Examples..." make_examples(fancy) system('cp -r Examples Jit/') print "Done. Building Extras..." system('mkdir Jit/Extras && cp Extras/excanvas.js Jit/Extras/excanvas.js') print "Done. Building Library..." lib = Build().build() license = open('LICENSE', 'r').read() f = open('Jit/jit.js', 'w') f.write(license) f.write(lib) f.close() print "Done. Compressing Library..." f = open('Jit/jit-yc.js', 'w') f.write(license) f.close() system('java -jar Extras/' + YC + ' Jit/jit.js >> Jit/jit-yc.js') print "Done. Zipping..." system('rm Jit.zip') system('zip -r Jit.zip Jit/') print "Done, I guess."
def get_running_builds(data): """ Return build data for currently running builds. :Args: data (dict): the return value of self.get_json() :Raises: KeyError if there is no 'builds' key in the data :Returns: dict: where keys are a PR id and the value is a list of 'builds' for the PR. Each item in the list has the data for the build as returned in self.get_json. """ build_data = defaultdict(list) for b in data['builds']: build = Build(b) if build.isbuilding: build_data[build.pr_id].append(b) return build_data
def stop_duplicates(self, build_data): """ Finds PRs that have multiple builds actively running. :Args: build_data: the data returned by self.get_running_builds() """ lines = [] for pr, builds in build_data.iteritems(): if self.one_per_author: # Assemble a dict where the key is the author and the # value is a list of that author's builds. pr_builds = defaultdict(list) for build in builds: author = Build(build).author pr_builds[author].append(build) # Now for each author, stop all but the most # recent build. for build_list in pr_builds.itervalues(): output = self.stop_all_but_most_recent(pr, build_list) if len(output) > 0: lines.extend(output) else: output = self.stop_all_but_most_recent(pr, builds) if len(output) > 0: lines.extend(output) if len(lines) > 0: out = ("\n---------------------------------" "\n** Extra running builds found. **" "\n---------------------------------\n") out += "\n".join(lines) logger.info(out) else: logger.info("No extra running builds found.")
def _setupUI(self): svnGroup = QtGui.QGroupBox("SVN Status") self.svn = SVN() svnLayout = QtGui.QHBoxLayout(svnGroup) svnLayout.addWidget(self.svn) buildGroup = QtGui.QGroupBox("Building") self.build = Build() buildLayout = QtGui.QHBoxLayout(buildGroup) buildLayout.addWidget(self.build) treeGroup = QtGui.QGroupBox("Available versions") self.tree = Tree() treeLayout = QtGui.QHBoxLayout(treeGroup) treeLayout.addWidget(self.tree) mainLayout = QtGui.QHBoxLayout() mainLayout.addWidget(treeGroup) mainLayout.addWidget(buildGroup) mainLayout.addWidget(svnGroup) centralWidget = QtGui.QWidget() centralWidget.setLayout(mainLayout) self.setCentralWidget(centralWidget)
def summary(jobsdir, newerthan, jsonfile): # calculate age limit based on retention days, # builds older than this will be ignored weather # they are found in json or jobdir. age_limit = (datetime.datetime.now() - datetime.timedelta(days=RETENTION_DAYS)) data = dict(builds=[]) # read data from json input file if os.path.exists(jsonfile): try: with open(jsonfile, 'r') as f: data = json.load(f) except Exception as e: sys.stderr.write("Failed to read json file: {jsonfile}".format( jsonfile=jsonfile)) traceback.print_exc(file=sys.stderr) # Current production data.json has some extremely long failure detail # fields. This commit includes a change to failure.py to ensure # that doesn't happen in future. However to deal with the problem # on disk, we load and truncate the fields here. # At the end of this run, the data file will be rewritten with # truncated values, so this fix code will only be needed once. for b in data['builds']: for f in b['failures']: f['detail'] = f['detail'][:1000] # create set of build ids so we don't scan builds # we already have summary information about build_dict = { "{jn}_{bn}".format(jn=b['job_name'], bn=b['build_num']): b for b in data['builds'] } # walk the supplied dir, scan new builds for count, build in enumerate([ "{}/build.xml".format(root) for root, dirs, files in os.walk(jobsdir) if "build.xml" in files and ("PM_" in root or "PR_" in root) ]): path_groups_match = re.search( ('^(?P<build_folder>.*/(?P<job_name>[^/]+)/' 'builds/(?P<build_num>[0-9]+))/'), build) if path_groups_match: path_groups = path_groups_match.groupdict() job_name = path_groups['job_name'] build_num = path_groups['build_num'] key = "{job_name}_{build_num}".format(job_name=job_name, build_num=build_num) if key in build_dict: continue try: build = Build(build_folder=path_groups['build_folder'], job_name=path_groups['job_name'], build_num=path_groups['build_num']) if build.timestamp > age_limit: if build.failed: # store the log in memory only as long as necessary build.log_lines = build.read_logs() Failure.scan_build(build) build.log_lines = [] if (count % 25 == 0): gc.collect() build_dict[key] = build sys.stderr.write(".") # sys.stderr.write("OK: {key}\n".format(key=key)) else: sys.stderr.write("_") # sys.stderr.write("Old Build: {key}\n" .format(key=key)) except lxml.etree.XMLSyntaxError as e: sys.stderr.write("\nFAIL: {key} {e}\n".format(key=key, e=e)) except Exception as e: sys.stderr.write("\nFAIL: {key} {e}\n".format(key=key, e=e)) if ("can't parse internal" not in str(e)): traceback.print_exc(file=sys.stderr) # dump data out to json file # remove builds older than RETENTION_DAYS # ensure we only dump data newer than RETENTION_DAYS with open(jsonfile, "w") as f: f.write( serialise( dict(builds=build_dict.values(), timestamp=datetime.datetime.now(), retention_days=RETENTION_DAYS)))
recipient_cc = Recipient(user_recipient_cc if user_recipient_cc is not None else default_recipient_cc).recipients() if __name__ == '__main__': """ 打包程序 """ logger = logging.getLogger(__name__) coloredlogs.install(level='DEBUG', logger=logger) if len(Start.servers) == 0: logger.error('servers count is 0!') exit() # 清除目录 Utils.remove_workspace() begin_time = time.time() date_time = time.strftime('%Y%m%d%H%M', time.localtime(begin_time)) Code(version=Start.version).clone() Build(configuration=Start.configuration).pre_clean() for server in Start.servers: utils = Utils(server, Start.configuration) utils.copy_code() File(server, Start.configuration, Start.version).modify_all() Build(server, Start.configuration).run()
def main(): parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('-d', action="store", type=str, dest="view") parser.add_argument('-s', action="store", type=str, dest="server") parser.add_argument('-l', action="store", type=int, dest="zone_limit") parser.add_argument('-o', action="store", type=str, dest="origin") parser.add_argument('-p', action="store", type=str, dest="zone_path") parser.add_argument('-w', action="store", type=str, dest="override_path") parser.add_argument('-x', action="store_true", dest="delete_unknown_zone") parser.add_argument('-e', action="store", dest="named_restart_command") args = parser.parse_args() config_obj = Config() o_config = config_obj.get_config() stop_update_file_path = o_config.get('Global', 'StopUpdate') statefile_path = o_config.get('Global', 'StateFile') try: is_config_valid, config_message = config_obj.config_valid(o_config) except AttributeError: is_config_valid = False config_message = "No Configuration File Found" if os.path.exists(stop_update_file_path): print "Stop Update file Exists" sys.exit(2) if is_config_valid is False: print config_message sys.exit(2) if not args.view: msg = "DNS View command line argument required" Common.write_stop_update(stop_update_file_path, msg) print msg sys.exit(2) if not args.server: msg = "server command line argument required" Common.write_stop_update(stop_update_file_path, msg) print msg sys.exit(2) if not args.zone_path: msg = "zone_path command line argument required" Common.write_stop_update(stop_update_file_path, msg) print msg sys.exit(2) named_restart_command = "service named restart" if args.named_restart_command: named_restart_command = args.named_restart_command override_path = None if args.override_path: override_path = args.override_path named_reload = False named_failures = None reload_zones = [] api = API(o_config, args.view, origin=args.origin) all_records = [] all_zones = api.build_all_zones() # Track all of the zone names to compare via os.listdir all_zone_names = [] if args.zone_limit: all_zones = all_zones[:args.zone_limit] for zone in all_zones: is_reverse = Common.is_reverse_zone_name(zone) if is_reverse: zone = Common.reverse_name(zone) d_cmd = CMD(zone, args.server) output, errors = d_cmd.run() build = Build(output) all_records += build.run() local_zone = ZoneObject(zone, path=args.zone_path) local_serial = local_zone.get_serial() all_zone_names.append(local_zone.zone_name) try: axfr_zone = dnszone.from_xfr(query.xfr(args.server, zone)) except dns.exception.FormError: axfr_zone = None except dns.name.EmptyLabel: axfr_zone = None except Exception, e: axfr_zone = None if axfr_zone is None or local_zone is None: continue if axfr_zone is not None: axfr_zone_object = ZoneObject(zone, zone_obj=axfr_zone) axfr_serial = axfr_zone_object.get_serial() if axfr_serial != local_serial: named_reload = True write_path = os.path.join(args.zone_path, zone) try: axfr_zone.to_file(write_path) except Exception, e: print "Exception: {0}".format(exception) reload_zones.append({'zone': zone, 'path': write_path})
parser.add_argument("--train", help="trains model", action="store_true") parser.add_argument("--phase1", help="trains model", action="store_true") parser.add_argument("--phase2", help="trains model", action="store_true") parser.add_argument("--test", help="evaluates model", action="store_true") args = parser.parse_args() start = datetime.now() start_time = start.strftime("%H:%M:%S") print("Start Time =", start_time) ######################################### if args.train: b = Build('./datasets/42.csv') b.data = b.build_train_set(b.non_bot_tuples, b.bot_tuples) b.preprocess() print("Done pre-processing on Train set!") train_p1() train_p2() if args.phase1: # PRE-PROCESS THE TRAINING DATASET & UNSUPERVISED LEARNING b = Build('./datasets/42.csv') b.data = b.build_train_set(b.non_bot_tuples, b.bot_tuples) b.preprocess()
from build import Build import shutil if __name__ == "__main__": appBuilder = Build() appBuilder.build() shutil.make_archive(appBuilder.packageName, 'zip', appBuilder.packageName)