def sortStages(data, stat) -> None: """ ::param data: :type data: str :param stat: :type stat: str """ stageDict: Dict[str, Dict[str, Union[str, float]]] = {} stageList: List[Dict[str, Union[str, float]]] = [] for line in data: job = Job(**ujson.loads(zlib.decompress(line))) if job.has_stage(): if not ( getattr(cast(Stage_WaterLevel_KnownOccurrence, job.stage).name, locale) in stageDict ): stageDict[ getattr( cast(Stage_WaterLevel_KnownOccurrence, job.stage).name, locale, ) ] = { "name": getattr( cast(Stage_WaterLevel_KnownOccurrence, job.stage).name, locale, ), stat: 0.0, "count": 0.0, } cast( Dict[str, float], stageDict[ getattr( cast(Stage_WaterLevel_KnownOccurrence, job.stage).name, locale, ) ], )[stat] += cast(float, core.getValMultiDimensional(job, [stat])) cast( Dict[str, float], stageDict[ getattr( cast(Stage_WaterLevel_KnownOccurrence, job.stage).name, locale, ) ], )["count"] += 1.0 for stage in stageDict.values(): stageList.append( { "name": stage["name"], "value": cast(float, stage[stat]) / cast(float, stage["count"]), } ) pprint.pprint(sorted(stageList, key=lambda val: val["value"]))
def filterJobsOr( location, data: Union[str, List[bytes]], filterFunctions: List[Callable], outpath) -> Union[Tuple[str, str], Tuple[List[bytes], List[bytes]]]: if location == "disk": if not (os.path.exists( cast(str, data[:-6]) + "/" + outpath + ".jl.gz") and os.path. exists(cast(str, data[:-6]) + "/not" + outpath + ".jl.gz")): with gzip.open(cast(str, data)) as reader: if hasJobs("disk", data): with gzip.open( cast(str, data[:-6]) + "/" + outpath + ".jl.gz", "at", encoding="utf8", ) as writerA: with gzip.open( cast(str, data[:-6]) + "/not" + outpath + ".jl.gz", "at", encoding="utf8", ) as writerB: for line in reader: job = Job(**ujson.loads(line)) found = False for funct in filterFunctions: found = found or funct(job) if found: json.dump(job, writerA, default=lambda x: x.__dict__) writerA.write("\n") else: json.dump(job, writerB, default=lambda x: x.__dict__) writerB.write("\n") return ( cast(str, data[:-6]) + "/" + outpath + ".jl.gz", cast(str, data[:-6]) + "/not" + outpath + ".jl.gz", ) jobsWith: List[bytes] = [] jobsWithout: List[bytes] = [] for jobLine in cast(List[bytes], data): job = Job(**ujson.loads(zlib.decompress(jobLine))) found = False for funct in filterFunctions: found = found or funct(job) if found: jobsWith.append(jobLine) else: jobsWithout.append(jobLine) return (jobsWith, jobsWithout)
def job(): response = None if request.method == 'POST': response = handle_response(Job(**request.json).put()) elif request.method == 'GET': try: response = Job.get(request.json.get('uuid')).to_dict() except (AttributeError, KeyError): response = make_response( f"Unable to find item with uuid {request.json.get('uuid')}", 500) return response
def sortSpecial(data, stat) -> None: """ :param data: :type data: str :param stat: :type stat: str """ specialDict: Dict[str, Dict[str, Union[str, float]]] = {} specialList: List[Dict[str, Union[str, float]]] = [] for line in data: job = Job(**ujson.loads(zlib.decompress(line))) if not (getattr(job.my_data.special.name, locale) in specialDict): specialDict[getattr(job.my_data.special.name, locale)] = { "name": getattr(job.my_data.special.name, locale), stat: 0.0, "count": 0.0, } cast(Dict[str, float], specialDict[getattr(job.my_data.special.name, locale)],)[ stat ] += cast(float, core.getValMultiDimensional(job, [stat])) cast( Dict[str, float], specialDict[getattr(job.my_data.special.name, locale)], )["count"] += 1.0 for special in specialDict.values(): specialList.append( { "name": special["name"], "value": cast(float, special[stat]) / cast(float, special["count"]), } ) pprint.pprint(sorted(specialList, key=lambda val: val["value"]))
def waveClearPercentageWithWeapon(location, data, weapon): """ :param data: :type data: str :param weapon: :type weapon: str :return: :rtype: float """ if location == "disk": reader = gzip.open(data) else: reader = data sumVal: float = 0.0 count: float = 0.0 for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) sumVal += int((weapon in ( job.my_data.weapons[0].key, getattr(job.my_data.weapons[0].name, locale), ) and job["clear_waves"] > 0) or (len(job.my_data.weapons) > 1 and weapon in ( job.my_data.weapons[1].key, getattr(job.my_data.weapons[1].name, locale), ) and job.clear_waves > 1) or (len(job.my_data.weapons) > 2 and weapon in ( job.my_data.weapons[2].key, getattr(job.my_data.weapons[2].name, locale), ) and job.clear_waves > 2)) count += int(weapon in ( job.my_data.weapons[0].key, getattr(job.my_data.weapons[0].name, locale), ) or (len(job.my_data.weapons) > 1 and weapon in ( job.my_data.weapons[1].key, getattr(job.my_data.weapons[1].name, locale), )) or (len(job.my_data.weapons) > 2 and weapon in ( job.my_data.weapons[2].key, getattr(job.my_data.weapons[2].name, locale), ))) if location == "disk": reader.close() return sumVal / count
def getArrayOfStat(location, data: Union[str, List[bytes]], stat) -> List[float]: """ Collect all the values of a single stat for a given list of jobs. :param data: the full path to the data file :type data: str :param stat: the stat to retrieve :type stat: str :return: the stat for each job in the data :rtype: List[float] :Example: >>> import core >>> min(core.getArrayOfStat("data/salmonAll.jl.gz", "quota 0")) 3.0 """ if location == "disk": reader: Union[GzipFile, List[bytes]] = gzip.open(cast(str, data)) else: reader = cast(List[bytes], data) results: List[float] = [] for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) results.append( float( getValMultiDimensional( job, cast( List[Union[str, int]], list( map( lambda ele: int(ele) if ele.isdigit() else ele, stat.split(), )), ), ))) if location == "disk": cast(GzipFile, reader).close() return results
def listAllUsers(location, data) -> List[str]: """ :param data: :type data: str """ result: List[str] = [] if location == "disk": reader = gzip.open(data) else: reader = data for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) if job.my_data.splatnet_id not in result: result.append(job.my_data.splatnet_id) if location == "disk": reader.close() return result
def findPlayerIdByName(location, data, player) -> List[str]: """ Find all the recorded player IDs for a given player name. :param data: the full name of the data file :type data: str :param player: the player name to find :type player: str :return: the list of found player IDs :rtype: List[str] :raises gzip.BadGzipFile: if the file exists but isn't a gzip file :raises FileNotFoundError: if the file doesn't exist >>> import core >>> core.findPlayerIdByName("data/salmonAll.jl.gz", "CassTheFae") ['aeda69d2070fafb6'] """ foundIds: List[str] = [] if location == "disk": reader = gzip.open(data) else: reader = data for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) if job.my_data.name == player and job.my_data.splatnet_id not in foundIds: foundIds.append(job.my_data.splatnet_id) if job.teammates is not None: for teammate in job.teammates: if teammate.name == player and teammate.splatnet_id not in foundIds: foundIds.append(teammate.splatnet_id) if location == "disk": reader.close() return foundIds
def sortRotation(data, stat) -> None: """ Print the sorted rotations by the average of the given stat. :param data: the data file name :type data: str :param stat: the statistic to sort by :type stat: str """ rotationList: List[int] = [] rotationResultsList: List[ Dict[str, Union[int, float, Union[None, Dict[str, Union[str, List[str]]]]]] ] = [] for line in data: job = Job(**ujson.loads(zlib.decompress(line))) if job.shift_start_at.time not in rotationList: rotationList.append(job.shift_start_at.time) for rotation in rotationList: print(rotation) result: Dict[ str, Union[int, float, Union[None, Dict[str, Union[str, List[str]]]]] ] = {} filterPaths: Tuple[List[bytes], List[bytes]] = cast( Tuple[List[bytes], List[bytes]], filters.duringRotationInts("mem", data, [rotation]), ) withVal: List[bytes] = filterPaths[0] withoutVal: List[bytes] = filterPaths[1] if hasJobs("mem", withVal): if (hasJobs("mem", withVal)) and (hasJobs("mem", withoutVal)): result["name"] = rotation result["data"] = core.findWeaponsAndStageByRotation( "mem", withVal, rotation ) result["value"] = ( statSummary("mem", withVal, stat)[0] - statSummary("mem", withoutVal, stat)[0] ) rotationResultsList.append(result) pprint.pprint( sorted(rotationResultsList, key=lambda val: cast(float, val["value"])) )
def getSingleJob(data, index=0) -> Optional[Job]: """ :param data: the full name of the data file :type data: str :param index: the index in the list of the job to find :type index: int :return: either the found job or None if there isn't a job at that index :rtype: Optional[Job] :raises gzip.BadGzipFile: if the file exists but isn't a gzip file :raises FileNotFoundError: if the file doesn't exist """ count = 0 with gzip.open(data) as reader: for line in reader: if count == index: return Job(**ujson.loads(line)) count += 1 return None
def list_jobs(): return jsonify_objects(Job.list())
def create_jobs_table(): return handle_response(Job.create_table())
}, "rush": { "key": "rush", "count": 0.0 }, "cohock_charge": { "key": "cohock_charge", "count": 0.0 }, "griller": { "key": "griller", "count": 0.0 }, "goldie_seeking": { "key": "goldie_seeking", "count": 0.0 }, } total = 0.0 with gzip.open(data) as reader: for line in reader: job = Job(**ujson.loads(line)) for wave in job.waves: total += 1.0 if wave.known_occurrence is not None: eventDict[wave.known_occurrence.key]["count"] += 1.0 else: eventDict["None"]["count"] += 1.0 for event in eventDict.values(): print(event["key"] + ": " + str(event["count"] / total))
import core from core import locale from objects import Job import numpy as np from scipy.stats import ttest_ind from typing import List, cast, Tuple import matplotlib.pyplot as plt import ujson import zlib import filters dataFile: str = core.init("All", "data") data: List[bytes] = core.loadJobsFromFile(dataFile) stageList: List[str] = [] for line in data: job: Job = Job(**ujson.loads(zlib.decompress(line))) if job.stage is not None: if not (getattr(job.stage.name, locale) in stageList): stageList.append(getattr(job.stage.name, locale)) listOfData: List[Tuple[List[bytes], List[bytes]]] = [] for stage in stageList: listOfData.append( cast(Tuple[List[bytes], List[bytes]], filters.onStages("mem", data, [stage])) ) with open("reports/stages.txt", "w", encoding="utf-8") as writer: i: int = 1 for stageFiles in listOfData: plt.figure(i) withVal: List[bytes] = stageFiles[0] withoutVal: List[bytes] = stageFiles[1] withValClearWaves: List[float] = []
def findRotationByWeaponsAndStage(location: str, data: Union[str, List[bytes]], **kargs) -> List[int]: """ Find the rotation IDs for a rotation of the given weapons and stage in the given data file. :param data: str: the full path of the data file :type data: str :param weapons: the chosen weapons :type weapons: Union[Tuple[str, str, str, str], List[str]] :param stage: str: the chosen stage :type stage: str :return: a list of rotation IDs :rtype: List[int] :raises gzip.BadGzipFile: if the file exists but isn't a gzip file :raises FileNotFoundError: if the file doesn't exist :Example: >>> import core >>> findRotationByWeaponsAndStage( ... "data/salmon.jl.gz", ... ( ... "Grizzco Charger", ... "Grizzco Blaster", ... "Grizzco Slosher", ... "Grizzco Brella" ... ), ... "Ruins of Ark Polaris" ... ) [1563537600, 1607752800] """ foundRotations: List[int] = [] if location == "disk": reader: Union[GzipFile, List[bytes]] = gzip.open(cast(str, data)) else: reader = cast(List[bytes], data) for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) found = kargs.get("stage") is None or ( job.stage is not None and kargs.get("stage") in ( job.stage.key, getattr(job.stage.name, locale), )) if kargs.get("weapons") is not None: for weapon in cast(List[str], kargs.get("weapons")): found = found and ( job.my_data.weapons[0].key == weapon or (len(job.my_data.weapons) > 1 and job.my_data.weapons[1].key == weapon) or (len(job.my_data.weapons) > 2 and job.my_data.weapons[2].key == weapon) or (job.teammates is not None and ((len(job.teammates) > 0 and job.teammates[0].weapons is not None and (job.teammates[0].weapons[0].key == weapon or (len(job.teammates[0].weapons) > 1 and job.teammates[0].weapons[1].key == weapon) or (len(job.teammates[0].weapons) > 2 and job.teammates[0].weapons[2].key == weapon))) or (len(job.teammates) > 1 and job.teammates[1].weapons is not None and (job.teammates[1].weapons[0].key == weapon or (len(job.teammates[1].weapons) > 1 and job.teammates[1].weapons[1].key == weapon) or (len(job.teammates[1].weapons) > 2 and job.teammates[1].weapons[2].key == weapon))) or (len(job.teammates) > 2 and job.teammates[2].weapons is not None and (job.teammates[2].weapons[0].key == weapon or (len(job.teammates[2].weapons) > 1 and job.teammates[2].weapons[1].key == weapon) or (len(job.teammates[2].weapons) > 2 and job.teammates[2].weapons[2].key == weapon))))) or getattr(job.my_data.weapons[0].name, locale) == weapon or (len(job.my_data.weapons) > 1 and getattr( job.my_data.weapons[1].name, locale) == weapon) or (len(job.my_data.weapons) > 2 and getattr( job.my_data.weapons[2].name, locale) == weapon) or (job.teammates is not None and ((len(job.teammates) > 0 and job.teammates[0].weapons is not None and (getattr(job.teammates[0].weapons[0].name, locale) == weapon or (len(job.teammates[0].weapons) > 1 and getattr(job.teammates[0].weapons[1].name, locale) == weapon) or (len(job.teammates[0].weapons) > 2 and getattr(job.teammates[0].weapons[2].name, locale) == weapon))) or (len(job.teammates) > 1 and job.teammates[1].weapons is not None and (getattr(job.teammates[1].weapons[0].name, locale) == weapon or (len(job.teammates[1].weapons) > 1 and getattr(job.teammates[1].weapons[1].name, locale) == weapon) or (len(job.teammates[1].weapons) > 2 and getattr(job.teammates[1].weapons[2].name, locale) == weapon))) or (len(job.teammates) > 0 and job.teammates[2].weapons is not None and (getattr(job.teammates[2].weapons[0].name, locale) == weapon or (len(job.teammates[2].weapons) > 1 and getattr(job.teammates[2].weapons[1].name, locale) == weapon) or (len(job.teammates[2].weapons) > 2 and getattr(job.teammates[2].weapons[2].name, locale) == weapon)))))) if found and job.shift_start_at.time not in foundRotations: foundRotations.append(job.shift_start_at.time) if location == "disk": cast(GzipFile, reader).close() return foundRotations
def getOverview(location, data) -> str: """ :param data: :type data: str """ result = "" if location == "disk": result = data + "\n" reader = gzip.open(data) else: reader = data stats = [ "clear_waves", "my_data golden_egg_delivered", "my_data power_egg_collected", "my_data rescue", "my_data death", "danger_rate", ] clearCount: float = 0.0 waveTwoCount: float = 0.0 waveOneCount: float = 0.0 sumVal: List[float] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] maxVal: List[float] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] minVal: List[float] = [ sys.float_info.max, sys.float_info.max, sys.float_info.max, sys.float_info.max, sys.float_info.max, sys.float_info.max, ] vals: List[List[float]] = [[], [], [], [], [], []] count: int = 0 for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) count += 1 clearCount += float(job.clear_waves == 3) waveTwoCount += float(job.clear_waves >= 2) waveOneCount += float(job.clear_waves >= 1) for i in range(0, len(stats)): val = float( getValMultiDimensional( job, cast(List[Union[str, int]], stats[i].split()), )) sumVal[i] += val maxVal[i] = max(maxVal[i], val) minVal[i] = min(minVal[i], val) vals[i].append(val) result += "Jobs: " + str(count) + "\n" result += "Average Waves: " + str(sumVal[0] / count) + "\n" result += "Clear %: " + str(clearCount / count) + "\n" result += "Wave 2 %: " + str(waveTwoCount / count) + "\n" result += "Wave 1 %: " + str(waveOneCount / count) + "\n" result += "Golden: {} ({}, {}, {}\n".format(sumVal[1] / count, minVal[1], np.median(vals[1]), maxVal[1]) result += "Power Eggs: {} ({}, {}, {})\n".format(sumVal[2] / count, minVal[2], np.median(vals[2]), maxVal[2]) result += "Rescued: {} ({}, {}, {})\n".format(sumVal[3] / count, minVal[3], np.median(vals[3]), maxVal[3]) result += "Deaths: {} ({}, {}, {})\n".format(sumVal[4] / count, minVal[4], np.median(vals[4]), maxVal[4]) result += "Hazard Level: {} ({}, {}, {})\n".format(sumVal[5] / count, minVal[5], np.median(vals[5]), maxVal[5]) return result
def statSummary(location, data: Union[str, List[bytes]], stats) -> dict: """ Find the average, min, median, and max of a stat given a data file :param data: str: The full file path of the data file :param stat: str: The stat :return: The resulting average, min, median, and max :rtype: Tuple[float, float, float, float] """ if location == "disk": reader: Union[GzipFile, List[bytes]] = gzip.open(cast(str, data)) else: reader = cast(List[bytes], data) statDict: Dict[str, Dict[str, Union[float, List[float]]]] = {} resultDict: Dict[str, Dict[str, float]] = {} for stat in stats: statDict[stat] = { "sum_val": 0.0, "max_val": 0.0, "min_val": sys.float_info.max, "vals": [], "count": 0.0, } resultDict[stat] = { "min_val": sys.float_info.max, "max_val": 0.0, "count": 0.0, "median": 0.0, "standard_deviation": 0.0, "mean": 0.0, "sum": 0.0, } for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) for stat in statDict: val = float( getValMultiDimensional( job, list( map(lambda ele: int(ele) if ele.isdigit() else ele, stat.split())), )) cast(Dict[str, float], statDict[stat])["sum_val"] += val cast(Dict[str, float], statDict[stat])["count"] += 1.0 statDict[stat]["max_val"] = max(statDict[stat]["max_val"], val) statDict[stat]["min_val"] = min(statDict[stat]["min_val"], val) cast(List[float], statDict[stat]["vals"]).append(val) if location == "disk": cast(GzipFile, reader).close() for stat in stats: resultDict[stat]["min_val"] = cast(float, statDict[stat]["min_val"]) resultDict[stat]["max_val"] = cast(float, statDict[stat]["max_val"]) resultDict[stat]["mean"] = cast(float, statDict[stat]["sum_val"]) / cast( float, statDict[stat]["count"]) resultDict[stat]["standard_deviation"] = np.std(statDict[stat]["vals"]) resultDict[stat]["median"] = np.median(statDict[stat]["vals"]) resultDict[stat]["sum"] = cast(float, statDict[stat]["sum_val"]) resultDict[stat]["count"] = cast(float, statDict[stat]["count"]) return resultDict
def findWeaponsAndStageByRotation( location, data, rotation) -> Dict[str, Union[str, List[str]]]: """ Find the weapons and stage for a given rotation. :param data: the full path of the data file :type data: str :param rotation: the unix time of the rotation start :type rotation: int :return: the weapons and stage for that rotation :rtype: Dict[str, Union[str, List[str]]] :raises gzip.BadGzipFile: if the file exists but isn't a gzip file :raises FileNotFoundError: if the file doesn't exist :Example: >>> import core >>> core.findWeaponsAndStageByRotation( ... "data/salmonAll.jl.gz", ... 1607752800 ... ) { 'stage': 'Ruins of Ark Polaris', 'weapons': [ 'Grizzco Brella', 'Grizzco Blaster', 'Grizzco Charger', 'Grizzco Slosher' ] } """ result: Dict[str, Union[str, List[str]]] = {} if location == "disk": reader = gzip.open(data) else: reader = data for line in reader: if location == "disk": job = Job(**ujson.loads(line)) else: job = Job(**ujson.loads(zlib.decompress(line))) if job.shift_start_at.time == rotation: if isinstance(job.stage, Stage_WaterLevel_KnownOccurrence): result["stage"] = getattr(job.stage.name, locale) result["weapons"] = [] if job.my_data.weapons is not None: for weapon in job.my_data.weapons: if getattr(weapon.name, locale) not in result["weapons"]: cast(Dict[str, List[str]], result)["weapons"].append( getattr(weapon.name, locale)) for teammate in job.teammates: if teammate.weapons is not None: for weapon in teammate.weapons: if getattr(weapon.name, locale) not in result["weapons"]: cast(Dict[str, List[str]], result)["weapons"].append( getattr(weapon.name, locale)) if location == "disk": reader.close() return result