def get_resistances(build: Build, force_display=False): """ Creates the resistance string :param build: build we want to output :param normal_res_cap: values above this threshold are displayed :param force_display: override threshold :return: string containing all resistances or and empty string if nothing is noteworthy """ output = "**Resistances**: " resistances = ['Fire', 'Cold', 'Lightning', 'Chaos'] emojis = [':fire:', ':snowflake:', ':zap:', ':skull:'] show = False for i, res in enumerate(resistances): res_val = build.get_stat( 'Player', res + 'Resist', OutputThresholds.CHAOS_RES.value if res == 'Chaos' else OutputThresholds.ELE_RES.value) res_over_cap = build.get_stat('Player', res + 'ResistOverCap') if res_val: output += emojis[i] + " {:.0f}".format(res_val) show = True if res_over_cap and res_over_cap > 0: output += "(+{:.0f}) ".format(res_over_cap) output += " " output += "\n" return output if show else ""
def get_build(self): """ Pre-fill some build attributes based on GET parameters """ build = Build( forum_id = self.cleaned_data['forum_id'], topic_id = self.cleaned_data['topic_id'], user_id = self.request.user.id, profile_id = self.request.user.get_profile().id, title = self.cleaned_data['title'], ) # create url current_site = get_current_site(self.request) build.url = "http://%s%s" % (current_site.domain, build.topic_url) # Try to parse the scale pattern = re.compile(r'1[:/](\d{1,4})') match = re.search(pattern, build.title) if match: build.scale = match.group(1) # see if we can match a brand... bits = build.title.split() for bit in bits: brands = Brand.objects.filter(name__iexact=bit) if brands: build.brand = brands[0] return build
def _create_build(branch, platform, builder, key_name='some-build'): build_key = Build(key_name=key_name, branch=branch, platform=platform, builder=builder, buildNumber=1, revision=100, timestamp=datetime.now()).put() return Build.get(build_key)
def create_build(build_number, revision): timestamp = datetime.now().replace(microsecond=0) build = Build(branch=branch, platform=platform, builder=builder, buildNumber=build_number, revision=revision, timestamp=timestamp) build.put() return build
def _create_results(self, branch, platform, builder, test_name, values, timestamps=None): results = [] for i, value in enumerate(values): build = Build(branch=branch, platform=platform, builder=builder, buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now()) build.put() result = TestResult(name=test_name, build=build, value=value) result.put() results.append(result) return results
def _create_results(branch, platform, builder, test_name, values): results = [] for i, value in enumerate(values): build = Build(branch=branch, platform=platform, builder=builder, buildNumber=i, revision=100 + i, timestamp=datetime.now()) build.put() result = TestResult(name=test_name, build=build, value=value) result.put() Test.update_or_insert(test_name, branch, platform) results.append(result) return results
def _create_results(self, branch, platform, builder, test_name, values, timestamps=None, starting_revision=100): builds = [] results = [] for i, value in enumerate(values): build = Build(branch=branch, platform=platform, builder=builder, buildNumber=i, revision=starting_revision + i, timestamp=timestamps[i] if timestamps else datetime.now()) build.put() result = TestResult(name=test_name, build=build, value=value) result.put() builds.append(build) results.append(result) return builds, results
def get_charges(build: Build, charge_types=['Endurance', 'Frenzy', 'Power']): output = [] for charge_type in charge_types: val = build.get_stat('Player', charge_type + 'Charges') max_val = build.get_stat('Player', charge_type + 'ChargesMax') charge_is_active = build.config.get('use' + charge_type + "Charges") if charge_is_active and val and max_val: output.append('{}: {:.0f}/{:.0f}'.format(charge_type, val, max_val)) return ', '.join(output) if len(output) > 0 else None
def post(self): self.response.headers['Content-Type'] = 'text/plain; charset=utf-8' log_id = int(self.request.get('id', 0)) log = ReportLog.get_by_id(log_id) if not log or not log.commit: self.response.out.write("Not processed") return branch = log.branch() platform = log.platform() build = Build.get_or_insert_from_log(log) for test_name, result_value in log.results().iteritems(): test = Test.update_or_insert(test_name, branch, platform) result = TestResult.get_or_insert_from_parsed_json(test_name, build, result_value) runs = Runs.get_by_objects(branch, platform, test) regenerate_runs = True if runs: runs.update_incrementally(build, result) regenerate_runs = False schedule_runs_update(test.id, branch.id, platform.id, regenerate_runs) log = ReportLog.get(log.key()) log.delete() # We need to update dashboard and manifest because they are affected by the existance of test results schedule_dashboard_update() schedule_manifest_update() self.response.out.write('OK')
def lastclobber(): "Get the max/last clobber time for a particular builddir and branch." session = g.db.session(DB_DECLARATIVE_BASE) now = int(time.time()) branch = request.args.get('branch') slave = request.args.get('slave') builddir = request.args.get('builddir') buildername = request.args.get('buildername') # TODO: Move the builds update to a separate endpoint (requires client changes) build = Build.as_unique( session, branch=branch, builddir=builddir, buildername=buildername, ) # Always force the time to update build.last_build_time = now session.add(build) session.commit() max_ct = session.query(ClobberTime).filter( ClobberTime.builddir == builddir, ClobberTime.branch == branch, # a NULL slave value signifies all slaves or_(ClobberTime.slave == slave, ClobberTime.slave == None) # noqa ).order_by(desc(ClobberTime.lastclobber)).first() if max_ct: # The client parses this result by colon as: # builddir, lastclobber, who = urlib2.open.split(':') # as such it's important for this to be plain text and have # no extra colons within the field values themselves return "{}:{}:{}\n".format(max_ct.builddir, max_ct.lastclobber, max_ct.who) return ""
def execute(): build = Build.get_by_key_name(key_name) if build: return self._output('The build at %s already exists for %s' % (str(timestamp), builder.name)) return Build(branch=branch, platform=platform, builder=builder, buildNumber=buildNumber, timestamp=timestamp, revision=revision, key_name=key_name).put()
def test_get_or_insert_from_log(self): branch, platform, builder = _create_some_builder() timestamp = datetime.now().replace(microsecond=0) log = ReportLog( timestamp=timestamp, headers='some headers', payload= '{"branch": "some-branch", "platform": "some-platform", "builder-name": "some-builder",' + '"build-number": 123, "webkit-revision": 456, "timestamp": %d}' % int(mktime(timestamp.timetuple()))) self.assertThereIsNoInstanceOf(Build) build = Build.get_or_insert_from_log(log) self.assertTrue(build) self.assertEqual(build.branch.key(), branch.key()) self.assertEqual(build.platform.key(), platform.key()) self.assertEqual(build.builder.key(), builder.key()) self.assertEqual(build.buildNumber, 123) self.assertEqual(build.revision, 456) self.assertEqual(build.chromiumRevision, None) self.assertEqual(build.timestamp, timestamp) self.assertOnlyInstance(build)
def add_build(request, p_id): project = Project.objects.get(id=p_id) if request.POST: version = request.POST['version'] description = request.POST['description'] new_build = Build(version=version, description=description, project=project) new_build.save() return HttpResponseRedirect('/project/get/' + str(project.id) + '/') else: raise PermissionDenied
def generate_response(author, build: Build, minified=False): """ Build an embed to respond to the user. :param author: name of the person triggering the action :param build: build to parse an embed from :param minified (bool): whether to get a minified version or the full one :return: Filled embed for discord """ embed = create_embed(author, build.level, build.ascendency_name, build.class_name, build.get_active_skill()) # add new fields def_str = defense_output.get_defense_string(build) if def_str: embed.add_field(name="Defense", value=def_str, inline=minified) offense = offense_output.get_offense(build) if offense: embed.add_field(name="Offense", value=offense, inline=minified) charges_str = charges_output.get_charges(build) if charges_str: embed.add_field(name="Charges", value=charges_str, inline=minified) # if not minified, add detailed infos. if not minified: skill = skill_output.get_main_skill(build) if skill: embed.add_field(name="Main Skill", value=skill, inline=minified) conf_str = config_output.get_config_string(build.config) if conf_str: embed.add_field(name="Config", value=conf_str, inline=minified) # output embed.add_field(name='Tree:', value=build.tree) return embed
def init_build(): if request.method == 'POST': path = request.form['path'] Logger.get().info("Pedido de testeo: %s" % (path, )) build = Build(path, g.user) build.save() datosjson = {"id": build.id, "path": build.path} Logger.get().info("Agregando pedido a cola de mensajes") beanstalk.put(json.dumps(datosjson)) else: Logger.get().info("Pedido de builds por el usuario %s" % (g.user.name, )) orderby = request.args.get('order', 'date') orderformat = request.args.get('list', 'desc') filterpath = request.args.get('path', None) filterfinished = request.args.get('finished', None) orderby = order_translate.get(orderby, Build.date) if orderformat == 'desc': orderby = orderby.desc() builds = Build.query.filter_by(user_id=g.user.id) if filterpath: builds = builds.filter_by(path=filterpath) if filterfinished: builds = builds.filter_by(finished=filterfinished) builds = builds.order_by(orderby).all() results = [] for bid in builds: results.append(bid.jsonrep(True)) return Response(json.dumps(results), status=200, mimetype="application/json") return jsonify(build.id)
def get_builds(): #gets all builds in DB using .find() method builds = [] for build in Build.find(): builds.append(build.to_json()) #returns data for all builds return json.dumps(builds), 200
def add_build(): pool_id = request.get_json().get('pool') cards = request.get_json().get('cards') pool = Pool.query.filter_by(id=pool_id).first() build = Build(cards, pool) db.session.add(build) db.session.commit() return 'Build Added %s' % (build)
def get_all_builds(subpath): path = "../h5ai/" + subpath build_list = [] i = 0 # Iterate through all builds & generate model list for filename in os.listdir(path): # Filter only APks, least like any other file type will appear, but still if filename.endswith(".apk"): relative_path = os.path.join(path, filename) timestamp = os.path.getmtime(relative_path) size = os.path.getsize(relative_path) # Remove letter chars for tag names if path.endswith("Nightly"): tag = re.sub("[^0-9]", "", filename) elif filename.startswith("AppWarden"): tag = re.search("_v(.*).apk", filename)[1] else: tag = re.search("_(.*).apk", filename)[1] # Calculate hashes md5 = hasher("md5", filename, path) sha256 = hasher("sha256", filename, path) # Get Gitlab releases url if filename.startswith("AuroraStore"): gitlab = constants.RELEASES_URL.format("AuroraStore", tag) if filename.startswith("AuroraDroid"): gitlab = constants.RELEASES_URL.format("AuroraDroid", tag) if filename.startswith("AppWarden"): gitlab = constants.RELEASES_URL.format("AppWarden", tag) if filename.startswith("AuroraWalls"): gitlab = constants.RELEASES_URL.format("AuroraWallpapers", tag) # Parse file as build build = Build( id=i, name=filename, tag_name=tag, timestamp=timestamp, size=size, md5_hash=md5, sha256_hash=sha256, download_url="{}/{}/{}".format(constants.DL_URL, subpath, filename), gitlab_url=gitlab, ) i = i + 1 # Add build to available build list build_list.append(build) if not build_list: return jsonify("List is empty!") else: return build_list
def get(self): self.response.headers['Content-Type'] = 'application/json; charset=utf-8' try: testId = int(self.request.get('id', 0)) branchId = int(self.request.get('branchid', 0)) platformId = int(self.request.get('platformid', 0)) except TypeError: # FIXME: Output an error here testId = 0 branchId = 0 platformId = 0 # FIXME: Just fetch builds specified by "days" # days = self.request.get('days', 365) cacheKey = Test.cacheKey(testId, branchId, platformId) cache = memcache.get(cacheKey) if cache: self.response.out.write(cache) return builds = Build.all() builds.filter('branch =', modelFromNumericId(branchId, Branch)) builds.filter('platform =', modelFromNumericId(platformId, Platform)) test = modelFromNumericId(testId, Test) testName = test.name if test else None test_runs = [] averages = {} values = [] timestamps = [] for build in builds: results = TestResult.all() results.filter('name =', testName) results.filter('build =', build) for result in results: builderId = build.builder.key().id() posixTimestamp = mktime(build.timestamp.timetuple()) test_runs.append([result.key().id(), [build.key().id(), build.buildNumber, build.revision], posixTimestamp, result.value, 0, [], builderId]) # FIXME: Calculate the average; in practice, we wouldn't have more than one value for a given revision averages[build.revision] = result.value values.append(result.value) timestamps.append(posixTimestamp) result = json.dumps({ 'test_runs': test_runs, 'averages': averages, 'min': min(values) if values else None, 'max': max(values) if values else None, 'date_range': [min(timestamps), max(timestamps)] if timestamps else None, 'stat': 'ok'}) self.response.out.write(result) memcache.add(cacheKey, result)
def post(self): self.response.headers['Content-Type'] = 'text/plain; charset=utf-8' try: test_id = int(self.request.get('id', 0)) branch_id = int(self.request.get('branchid', 0)) platform_id = int(self.request.get('platformid', 0)) except TypeError: # FIXME: Output an error here test_id = 0 branch_id = 0 platform_id = 0 # FIXME: Just fetch builds specified by "days" # days = self.request.get('days', 365) builds = Build.all() builds.filter('branch =', modelFromNumericId(branch_id, Branch)) builds.filter('platform =', modelFromNumericId(platform_id, Platform)) test = modelFromNumericId(test_id, Test) test_name = test.name if test else None test_runs = [] averages = {} values = [] timestamps = [] for build in builds: results = TestResult.all() results.filter('name =', test_name) results.filter('build =', build) for result in results: builderId = build.builder.key().id() posixTimestamp = mktime(build.timestamp.timetuple()) statistics = None if result.valueStdev != None and result.valueMin != None and result.valueMax != None: statistics = {'stdev': result.valueStdev, 'min': result.valueMin, 'max': result.valueMax} test_runs.append([result.key().id(), [build.key().id(), build.buildNumber, build.revision], posixTimestamp, result.value, 0, # runNumber [], # annotations builderId, statistics]) # FIXME: Calculate the average; in practice, we wouldn't have more than one value for a given revision averages[build.revision] = result.value values.append(result.value) timestamps.append(posixTimestamp) result = json.dumps({ 'test_runs': test_runs, 'averages': averages, 'min': min(values) if values else None, 'max': max(values) if values else None, 'date_range': [min(timestamps), max(timestamps)] if timestamps else None, 'stat': 'ok'}) cache_runs(test_id, branch_id, platform_id, result) self.response.out.write('OK')
def blame(): bad_people = dict() for build in Build.find(): if build.status == 2: if bad_people.get(build.author.name, None) is None: bad_people[build.author.name] = 1 else: bad_people[build.author.name] += 1 return jsonify(bad_people)
def _generate_runs(branch, platform, test_name): builds = Build.all() builds.filter('branch =', branch) builds.filter('platform =', platform) for build in builds: results = TestResult.all() results.filter('name =', test_name) results.filter('build =', build) for result in results: yield build, result raise StopIteration
def test_release_builder_hiding(client): session = test_context._app.db.session(DB_DECLARATIVE_BASE) buildername = BUILDER_REL_PREFIX + 'test' release_build = Build(branch='branch', builddir='test', buildername=buildername) session.add(release_build) session.commit() rv = client.get('/clobberer/lastclobber/branch/by-builder/branch') eq_(rv.status_code, 200) clobbertimes = json.loads(rv.data)["result"] eq_(clobbertimes.get(buildername), None)
def test_release_branch_hiding(client): session = test_context._app.db.session(DB_DECLARATIVE_BASE) # clear all the old branches session.query(Build).delete() session.commit() # users should not see this branch because it's associated with a release # builddir release_builddir = '{}builddir'.format(BUILDDIR_REL_PREFIX) session.add(Build(branch='see-no-evil', builddir=release_builddir)) session.commit() rv = client.get('/clobberer/branches') eq_(json.loads(rv.data)['result'], [])
def get_build(build_id): #looks up the build by its id in the database #the variable build_id is passed in the url try: build = Build.get_from_id(ObjectId(build_id)) except Exception: return jsonify(error="Invalid Build ID") if build is None: return jsonify(error="Invalid Build ID") #looks at the status of the build #returns jsonify(status of build) return (build.to_json(), 200)
def get_secondary_def(build: Build): """ Parse all secondary defenses such as armor, eva, dodge, block and display them if they are higher than the thresholds. :param build: current build :return: String containing noteworthy secondary defense, Empty string as default """ output = "" stats = [] effective_life = max( filter(None.__ne__, [ build.get_stat('Player', 'Life'), build.get_stat('Player', 'EnergyShield') ])) armour = build.get_stat('Player', 'Armour', min(OutputThresholds.ARMOUR.value, effective_life)) stats.append( "Armour: {:.0f}".format(armour)) if armour and armour else None evasion = build.get_stat( 'Player', 'Evasion', min(OutputThresholds.EVASION.value, effective_life)) stats.append("Evasion: {:.0f}".format(evasion)) if evasion else None dodge = build.get_stat('Player', 'AttackDodgeChance', OutputThresholds.DODGE.value) stats.append("Dodge: {:.0f}%".format(dodge)) if dodge else None spell_dodge = build.get_stat('Player', 'SpellDodgeChance', OutputThresholds.SPELL_DODGE.value) stats.append( "Spell Dodge: {:.0f}%".format(spell_dodge)) if spell_dodge else None block = build.get_stat('Player', 'BlockChance', OutputThresholds.BLOCK.value) stats.append("Block: {:.0f}%".format(block)) if block else None spell_block = build.get_stat('Player', 'SpellBlockChance', OutputThresholds.SPELL_BLOCK.value) stats.append( "Spell Block: {:.0f}%".format(spell_block)) if spell_block else None if len(stats) > 0: output += " | ".join([s for s in stats if s]) + "\n" return "**Secondary:** " + output if output != "" else None
def rebuild(): #takes build ID as parameter id = request.form.get('build_id', None) try: build = Build.get_from_id(ObjectId(id)) except Exception: return jsonify(error="Invalid Build ID") if build is None: return jsonify(error="Invalid Build ID") #looks up a build by that ID #rebuilds build to see if it fails new tests api.queue.add_build(build) if (api.worker is None or api.worker.current_build is None): api.worker = WorkerThread(api.queue, api.config, connection) api.worker.start() return jsonify(success=True, id=id)
def test_get_or_insert_from_log(self): branch, platform, builder = _create_some_builder() timestamp = datetime.now().replace(microsecond=0) log = ReportLog(timestamp=timestamp, headers='some headers', payload='{"branch": "some-branch", "platform": "some-platform", "builder-name": "some-builder",' + '"build-number": 123, "webkit-revision": 456, "timestamp": %d}' % int(mktime(timestamp.timetuple()))) self.assertThereIsNoInstanceOf(Build) build = Build.get_or_insert_from_log(log) self.assertTrue(build) self.assertEqual(build.branch.key(), branch.key()) self.assertEqual(build.platform.key(), platform.key()) self.assertEqual(build.builder.key(), builder.key()) self.assertEqual(build.buildNumber, 123) self.assertEqual(build.revision, 456) self.assertEqual(build.chromiumRevision, None) self.assertEqual(build.timestamp, timestamp) self.assertOnlyInstance(build)
def post(self): self.response.headers['Content-Type'] = 'text/plain; charset=utf-8' log_id = int(self.request.get('id', 0)) log = ReportLog.get_by_id(log_id) if not log or not log.commit: self.response.out.write("Not processed") return branch = log.branch() platform = log.platform() build = Build.get_or_insert_from_log(log) for test_name, result_value in log.results().iteritems(): unit = result_value.get('unit') if isinstance(result_value, dict) else None test = Test.update_or_insert(test_name, branch, platform, unit) result = TestResult.get_or_insert_from_parsed_json( test_name, build, result_value) if not result: continue runs = Runs.get_by_objects(branch, platform, test) regenerate_runs = True if runs: runs.update_incrementally(build, result) regenerate_runs = False schedule_runs_update(test.id, branch.id, platform.id, regenerate_runs) log = ReportLog.get(log.key()) log.delete() # We need to update dashboard and manifest because they are affected by the existance of test results schedule_dashboard_update() schedule_manifest_update() self.response.out.write('OK')
def parse_build(xml_root): xml_build = xml_root.find('Build') xml_items = xml_root.find('Items') xml_skills = xml_root.find('Skills') xml_tree = xml_root.find('Tree') selected_tree = Parser.get_tree_link(xml_tree) # parse items item_slots = Parser.parse_item_slots(xml_items) skills = Parser.parse_skills(xml_skills) active_skill = xml_build.attrib['mainSocketGroup'] build = Build(xml_build.attrib['level'], xml_build.attrib['targetVersion'], Parser.get_attrib_if_exists(xml_build, 'bandit'), xml_build.attrib['className'], xml_build.attrib['ascendClassName'], selected_tree, skills, active_skill, item_slots) for player_stat in xml_build: if 'stat' in player_stat.attrib and 'value' in player_stat.attrib: build.append_stat(player_stat.attrib['stat'], player_stat.attrib['value'], player_stat.tag) else: log.info( "Encountered unsupported player stat: k={}, v={}".format( player_stat.tag, player_stat.attrib)) # parse config for input in xml_root.find('Config'): if input.tag == "Input": extracted = [val for (key, val) in input.attrib.items()] if len(extracted) < 1: continue build.append_conf(extracted[0], extracted[1]) return build
# given the build text, expect parsed items from models import Build trigger_details = """myusername 2016-09-13 10:43:13 CDT DETAIL: STACK: api """ execution_summary = """Status: SUCCEEDED Duration: 11:13 """ build = Build(trigger_details, execution_summary) assert build.username == "myusername" assert build.status == "SUCCEEDED" assert build.status_is_valid() assert build.duration == "11:13" assert str(build.datetime_started) == "2016-09-13 10:43:13"
def get_defense_string(build: Build): output = "" life_percent_threshold = min( OutputThresholds.LIFE_PERCENT.value, OutputThresholds.LIFE_PERCENT_PER_LEVEL.value * build.level) life_string = get_basic_line( "Life", build.get_stat('Player', 'Life'), build.get_stat('Player', 'Spec:LifeInc', life_percent_threshold), basic_stat_regen=build.get_stat('Player', 'LifeRegen'), stat_unreserved=build.get_stat('Player', 'LifeUnreserved')) if life_string: output += life_string es_percent_threshold = min( OutputThresholds.ES_PERCENT.value, OutputThresholds.ES_PERCENT_PER_LEVEL.value * build.level) es_string = get_basic_line("Energy Shield", build.get_stat('Player', 'EnergyShield'), build.get_stat('Player', 'Spec:EnergyShieldInc', es_percent_threshold), basic_stat_regen=build.get_stat( 'Player', 'EnergyShieldRegen')) if es_string: output += es_string net_regen = build.get_stat('Player', 'NetLifeRegen') if net_regen: output += "**Net Regen**: {:.0f}/s\n".format(net_regen) mana_string = get_basic_line( "Mana", build.get_stat('Player', 'Mana'), build.get_stat('Player', 'Spec:ManaInc'), basic_stat_regen=build.get_stat('Player', 'ManaRegen'), stat_unreserved=build.get_stat('Player', 'ManaUnreserved')) if mana_string: output += mana_string # todo: only pass necessary values to the following options: secondary_def = get_secondary_def(build) if secondary_def: output += secondary_def output += get_resistances(build) return output