def main(): early_init() in_dev_mode = os.environ.get("DEV") image_server = os.environ.get("IMAGE_HOST", "") version = get_ssdb_version() application = tornado.web.Application( dispatch.ROUTES, template_path="webui", static_path="static", image_host=image_server, autoreload=1 if in_dev_mode else 0, is_dev=in_dev_mode, tle=tl_models.TranslationEngine(cached_keyed_db( private_data_path("names.csv")), use_satellite=1), enums=enums, starlight=starlight, tlable=endpoints.tlable, icon=endpoints.icon, audio=endpoints.audio, analytics=analytics.Analytics(), version=version) http_server = tornado.httpserver.HTTPServer(application, xheaders=1) addr = os.environ.get("ADDRESS", "0.0.0.0") port = int(os.environ.get("PORT", 5000)) http_server.listen(port, addr) print("Ready.") tornado.ioloop.IOLoop.instance().start()
def main(): early_init() in_dev_mode = os.environ.get("DEV") image_server = os.environ.get("IMAGE_HOST", "") version = get_ssdb_version() application = tornado.web.Application(dispatch.ROUTES, template_path="webui", static_path="static", image_host=image_server, autoreload=1 if in_dev_mode else 0, is_dev=in_dev_mode, tle=tl_models.TranslationEngine(cached_keyed_db( private_data_path("names.csv")), use_satellite=1), enums=enums, starlight=starlight, tlable=endpoints.tlable, icon=endpoints.icon, audio=endpoints.audio, analytics=analytics.Analytics(), version=version) http_server = tornado.httpserver.HTTPServer(application, xheaders=1) addr = os.environ.get("ADDRESS", "0.0.0.0") port = int(os.environ.get("PORT", 5000)) http_server.listen(port, addr) print("Ready.") tornado.ioloop.IOLoop.instance().start()
def load_bad_ranges_file(cls): br = set() try: with open(starlight.private_data_path("tlwrite_blocked_ranges.txt"), "r") as cf: for line in cf: br.add(ipaddress.ip_network(line.strip())) except OSError: pass cls.BAD_RANGES = br
def do_preswitch_tasks(new_db_path, old_db_path): subprocess.call([ "toolchain/name_finder.py", starlight.private_data_path("enamdictu"), new_db_path, starlight.transient_data_path("names.csv") ]) if not os.getenv("DISABLE_HISTORY_UPDATES", None): subprocess.call(["toolchain/update_rich_history.py", new_db_path]) if old_db_path: subprocess.call( ["toolchain/make_contiguous_gacha.py", old_db_path, new_db_path])
def get(self, story_id): if self.get_argument("names", "NO") == "YES": ap.load_name_map(starlight.private_data_path("names.csv")) else: ap.NAME_MAP = {} absolute = starlight.story_data_path( "storydata_{0}.csv".format(story_id)) if not os.path.exists(absolute): self.set_status(404) self.write("Script not found.") return self.set_header("Content-Type", "text/plain; charset=utf-8") self.write(self.banner) ap.format_for_wiki(absolute, lambda *x: self.write(" ".join(x) + "\n"))
QUERY_GET_EVENT_STUBS = "SELECT id, name, type, event_start, event_end FROM event_data" QUERY_GET_REWARDS_FOR_EVENT = "SELECT reward_id FROM event_available WHERE event_id = ? ORDER BY recommend_order" QUERY_GET_NORMAL_GACHAS = """SELECT gacha_data.id, gacha_data.name, start_date, end_date, type, type_detail, gacha_rate.rare_ratio, gacha_rate.sr_ratio, gacha_rate.ssr_ratio FROM gacha_data LEFT JOIN gacha_rate USING (id) WHERE type = 3 AND type_detail = 1""" QUERY_GET_GACHA_REWARD_META = """SELECT reward_id, limited_flag, recommend_order FROM gacha_available WHERE gacha_id = ? AND recommend_order != 0 ORDER BY recommend_order""" QUERY_GET_ROOTS = "SELECT id FROM card_data WHERE evolution_id != 0" QUERY_GET_STORY_START_DATES = """SELECT card_data.id, start_date FROM card_data LEFT JOIN story_detail ON (open_story_id == story_detail.id) WHERE card_data.id IN ({0})""" QUERY_FIND_CONTAINING_GACHA = "SELECT DISTINCT gacha_id FROM gacha_available WHERE reward_id = ? AND gacha_id IN ({0})" ea_overrides = list(csvloader.load_db_file(starlight.private_data_path("event_availability_overrides.csv"))) overridden_events = set(x.event_id for x in ea_overrides) def htype(x): return (x & 0xF0000000) >> 28 def internal_id(x): return x & 0x0FFFFFFF def get_overridden(event_id): for k, v in ea_overrides: if k == event_id: yield v def prime_from_cursor(typename, cursor, **kwargs): keys = list(kwargs.keys())
EVENT_REWARD_SPECIALIZATIONS = { 0: get_atapon_evt_rewards, # 1: get_caravan_evt_rewards, # Doesn't seem to exist. 2: get_groove_evt_rewards, 3: get_party_evt_rewards, 4: get_parade_evt_rewards, # 5: get_bus_evt_rewards, # another really nasty one 6: get_carnival_evt_rewards, } # ---------------------------------------------------------------- ea_overrides = list( csvloader.load_db_file( starlight.private_data_path("event_availability_overrides.csv"))) overridden_events = set(x.event_id for x in ea_overrides) def htype(x): return (x & 0xF0000000) >> 28 def internal_id(x): return x & 0x0FFFFFFF def get_overridden(event_id): for k, v in ea_overrides: if k == event_id: yield v
import json import sqlite3 from datetime import datetime import locale locale.setlocale(locale.LC_ALL, "en_US.UTF-8") from pytz import timezone, utc from collections import namedtuple import models from starlight import JST, private_data_path import csvloader overrides = csvloader.load_keyed_db_file( private_data_path("gacha_availability_overrides.csv")) gacha_stub_t = namedtuple( "gacha_stub_t", ("id", "name", "start_date", "end_date", "type", "subtype")) def gacha_ids(f): gachas = [] a = sqlite3.connect(f) for id, n, ss, es, t, t2 in a.execute( "SELECT id, name, start_date, end_date, type, type_detail FROM gacha_data where type = 3 and type_detail = 1" ): ss, es = JST(ss), JST(es) gachas.append(gacha_stub_t(id, n, ss, es, t, t2))
ret = { "event": [], "ssr": [], "sr": [], "r": [], "n": [] } for card_id in filter(lambda x: starlight.card_db[x].evolution_id, de["cids"]): if card_id in event_cards: ret["event"].append(card_id) else: key = ["n", "n", "r", "r", "sr", "sr", "ssr", "ssr"][starlight.card_db[card_id].rarity - 1] ret[key].append(card_id) return {"date": de["date"], "cids": ret} event_cards = [x.reward_id for x in starlight.cached_db(starlight.ark_data_path("event_available.csv"))] HISTORY = [sieve_diff_contents(x) for x in reversed(starlight.jsonl(starlight.private_data_path("history.json")))] @route(r"/") class Home(tornado.web.RequestHandler): gachas = list(filter(lambda x: sum(y.limited_flag for y in x.clist), # note: gachas with limited cards starlight.cached_db(starlight.ark_data_path("gacha_data.csv"), # gacha_available_t -> reward_id... clist=lambda obj: list(filter(lambda x: obj.id == x.gacha_id and x.limited_flag, starlight.cached_db(starlight.ark_data_path("gacha_available.csv")))), # int end_t=lambda obj: timegm(starlight.JST(obj.end_date).timetuple()) ))) def get(self): eda = starlight.cached_db(starlight.ark_data_path("event_data.csv")) now = pytz.utc.localize(datetime.utcnow()) if now.day == 29 and now.month == 2:
if card_id in event_cards: ret["event"].append(card_id) else: key = ["n", "n", "r", "r", "sr", "sr", "ssr", "ssr"][starlight.card_db[card_id].rarity - 1] ret[key].append(card_id) return {"date": de["date"], "cids": ret} event_cards = [ x.reward_id for x in starlight.cached_db( starlight.ark_data_path("event_available.csv")) ] HISTORY = [ sieve_diff_contents(x) for x in reversed( starlight.jsonl(starlight.private_data_path("history.json"))) ] @route(r"/") class Home(tornado.web.RequestHandler): gachas = list( filter( lambda x: sum(y.limited_flag for y in x.clist), # note: gachas with limited cards starlight.cached_db( starlight.ark_data_path("gacha_data.csv"), # gacha_available_t -> reward_id... clist=lambda obj: list( filter( lambda x: obj.id == x.gacha_id and x.limited_flag,