def create_default_files(cls) -> bool: dir_asset = os.path.join(DIR_PROJECT, DEFAULT_ASSET_DIR) dir_src = os.path.join(DIR_PROJECT, DEFAULT_SRC_DIR) dir_temp = os.path.join(DIR_PROJECT, DEFAULT_TEMP_DIR) for fname in SAMPLE_DATA: _fname = add_extention(fname, EXT_YAML) data_path = os.path.join(DIR_EXAMPLE, _fname) data = read_file(data_path) path = os.path.join(dir_asset, _fname) if is_exists_path(path): logger.debug( msg.PROC_MESSAGE.format(proc=f"Already exists {path}")) continue if write_file(path, data): logger.debug(msg.PROC_MESSAGE.format(proc=f"Create {path}")) else: logger.warning( msg.ERR_FAIL_CANNOT_WRITE_DATA.format(data=f"{path}")) return False for fname in SAMPLE_SRC: _fname = add_extention(fname, EXT_MARKDOWN) data_path = os.path.join(DIR_EXAMPLE, _fname) data = read_file(data_path) path = os.path.join(dir_src, _fname) if is_exists_path(path): logger.debug( msg.PROC_MESSAGE.format(proc=f"Already exists {path}")) continue if write_file(path, data): logger.debug(msg.PROC_MESSAGE.format(proc=f"Create {path}")) else: logger.warning( msg.ERR_FAIL_CANNOT_WRITE_DATA.format(data=f"{path}")) return False for fname in TEMP_FILES: data_path = os.path.join(DIR_TEMP, fname) data = read_file(data_path) path = os.path.join(dir_temp, fname) if write_file(path, data): logger.debug(msg.PROC_MESSAGE.format(proc=f"Create {path}")) else: logger.warning( msg.ERR_FAIL_CANNOT_WRITE_DATA.format(data=f"{path}")) return False return True
def get_assets_db() -> AssetsDB: _PROC = f"{PROC}: get assets db" logger.debug(msg.PROC_START.format(proc=_PROC)) db = AssetsDB() paths = get_filepaths_in(PM.get_asset_dir_path(), EXT_YAML, True) for path in paths: if not is_exists_path(path): logger.warning( msg.ERR_FAIL_MISSING_DATA.format( data=f"asset data of {path}: {PROC}")) continue data = read_file(path) # NOTE: file validate? obj = asset_object_from(data) if obj: assert isinstance(obj, SObject) db.add(obj.tag, obj) logger.debug( msg.PROC_MESSAGE.format(proc=f"Add '{obj.tag}' to asset db")) logger.debug(msg.PROC_SUCCESS.format(proc=_PROC)) return db
def nametags_from(assets: AssetsDB) -> dict: assert isinstance(assets, AssetsDB) logger.debug(msg.PROC_START.format(proc=PROC)) config = yaml.safe_load(read_file(FILE_CONFIG)) if not config: logger.error(msg.ERR_FAIL_MISSING_DATA.format(data=f"config file: {PROC}")) return {} mob_num = config[ELM_CONFIG][ELM_MOBS] tmp = {} for key, val in assets.data.items(): assert isinstance(key, str) assert isinstance(val, SObject) if isinstance(val, Person): if not Converter.person_names_of(tmp, val): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"person '{val.tag}' name: {PROC}")) elif isinstance(val, Stage): if not Converter.stage_names_of(tmp, val): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"stage '{val.tag}' name: {PROC}")) elif isinstance(val, Item): if not Converter.item_name_of(tmp, val): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"item '{val.tag}' name: {PROC}")) elif isinstance(val, NameTag): if NameTagType.MOB is val.type: if not Converter.mob_name_of(tmp, val, mob_num): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"mob names: {PROC}")) elif NameTagType.TIME is val.type: if not Converter.time_name_of(tmp, val): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"time names: {PROC}")) elif NameTagType.WORD is val.type: if not Converter.word_name_of(tmp, val): logger.warning( msg.ERR_FAIL_INVALID_DATA.format( data=f"word names: {PROC}")) else: continue elif isinstance(val, Rubi): continue else: continue logger.debug(msg.PROC_SUCCESS.format(proc=PROC)) return tmp
def create_base_project_files(cls) -> bool: for path in BASE_FILES: if is_exists_path(path): logger.debug( msg.PROC_MESSAGE.format(proc=f"Already exists {path}")) continue data_path = os.path.join(DIR_DATA, basename_of(path, False)) data = read_file(data_path) if path == FILE_PROJECT: data = cls._replace_project_file_data(data) if write_file(path, data): logger.debug(msg.PROC_MESSAGE.format(proc=f"Create {path}")) else: logger.warning( msg.ERR_FAIL_CANNOT_WRITE_DATA.format(data=f"{path}")) return False return True
def get_srcs_db() -> SrcsDB: _PROC = f"{PROC}: get sources db" logger.debug(msg.PROC_START.format(proc=_PROC)) db = SrcsDB() key_cache = [] paths = get_filepaths_in(PM.get_src_dir_path(), EXT_MARKDOWN, True) for path in paths: if not is_exists_path(path): logger.warning( msg.ERR_FAIL_MISSING_DATA.format( data=f"source data of {path}: {PROC}")) continue data = read_file(path) raws = assertion.is_list(raw_src_objects_from(data)) for raw in raws: if raw: assert isinstance(raw, RawSrc) if raw.tag in key_cache: logger.warning( msg.ERR_FAIL_DUPLICATED_DATA_WITH_DATA.format( data=f"tag name: {_PROC}"), raw.tag) continue db.add(raw.tag, raw) key_cache.append(raw.tag) logger.debug( msg.PROC_MESSAGE.format( proc=f"Add '{raw.tag}' to srcs db")) logger.debug(msg.PROC_SUCCESS.format(proc=_PROC)) return db
def create_common_files(cls) -> bool: dir_asset = os.path.join(DIR_PROJECT, DEFAULT_ASSET_DIR) dir_common = os.path.join(dir_asset, DEFAULT_COMMON) for fname in COMMON_FILES: _fname = add_extention(fname, EXT_YAML) data_path = os.path.join(DIR_COMMON, _fname) data = read_file(data_path) path = os.path.join(dir_common, _fname) if is_exists_path(path): logger.debug( msg.PROC_MESSAGE.format(proc=f"Already exists {path}")) continue if write_file(path, data): logger.debug(msg.PROC_MESSAGE.format(proc=f"Create {path}")) else: logger.warning( msg.ERR_FAIL_CANNOT_WRITE_DATA.format(data=f"{path}")) return False return True
def _get_columns_and_rows() -> tuple: data = yaml.safe_load(read_file(FILE_CONFIG))['config'] return data['columns'], data['rows']
def compile_codes(scenes: ScenesDB, assets: AssetsDB) -> StoryData: assert isinstance(scenes, ScenesDB) assert isinstance(assets, AssetsDB) logger.debug(msg.PROC_START.format(proc=PROC)) config = yaml.safe_load(read_file(FILE_CONFIG)) if not config: logger.error( msg.ERR_FAIL_MISSING_DATA.format(data=f"config file: {PROC}")) return None entry = config[ELM_CONFIG][ELM_ENTRY] if not scenes.has(entry): logger.error( msg.ERR_FAIL_MISSING_DATA_WITH_DATA.format( data=f"entry point: {PROC}"), entry) return None data = call_scene(0, entry, scenes) if not data: logger.error( msg.ERR_FAIL_MISSING_DATA.format(data=f"story data: {PROC}")) return None tags = nametags_from(assets) if not tags: logger.error( msg.ERR_FAIL_MISSING_DATA.format(data=f"name tags: {PROC}")) return None tags_sorted = dict_sorted(tags) # NOTE: current tags unused timeclocks = timeclocks_from(assets) if not timeclocks: logger.error( msg.ERR_FAIL_MISSING_DATA.format(data=f"time clocks tags: {PROC}")) return None updated_alias = apply_alias(data) if not updated_alias: logger.error( msg.ERR_FAIL_MISSING_DATA.format(data=f"apply alias data: {PROC}")) return None updated_timeclock = apply_scene_time_to_clock(updated_alias, timeclocks) if not updated_timeclock: logger.error( msg.ERR_FAIL_MISSING_DATA.format( data=f"apply time clock data: {PROC}")) return None updated_same_info = apply_scene_info_same(updated_timeclock) if not updated_same_info: logger.error( msg.ERR_FAIL_MISSING_DATA.format( data=f"apply same info data: {PROC}")) return None updated_same_acts = apply_scene_action_same(updated_same_info) if not updated_same_acts: logger.error( msg.ERR_FAIL_MISSING_DATA.format( data=f"apply same act data: {PROC}")) return None # if date and year refine by next updated_next = apply_scene_info_next(updated_same_acts) if not updated_next: logger.error( msg.ERR_FAIL_MISSING_DATA.format( data=f"apply next date time: {PROC}")) return None # apply inst updated_inst = apply_instructions(updated_next) if not updated_inst: logger.error( msg.ERR_FAIL_MISSING_DATA.format( data=f"apply instruction data: {PROC}")) return None # tag convert # TODO: ここで一回タグ変換するか?いなか? logger.debug( msg.MSG_UNIMPLEMENT_PROC.format(proc=f"tag convert phase: {PROC}")) logger.debug(msg.PROC_SUCCESS.format(proc=PROC)) return StoryData(updated_inst)