def run(): data = get_latest_backgrounds() data = parse(data) data = srdfilter(data) dump(data, 'backgrounds.json') dump(srdonly(data), 'srd-backgrounds.json') diff('srd-backgrounds.json')
def run(): data = get_latest_feats() data = prerender(data) data = srdfilter(data) data = fix_dupes(data, SOURCE_HIERARCHY, True) dump(data, 'feats.json') diff('feats.json')
def run(): data = get_spells() processed = parse(data) dump(processed, 'spells.json') diff('spells.json') dump(get_auto_only(processed), 'spellauto.json') site_templates = site_parse(processed) dump(site_templates, 'template-spells.json')
def run(): data = get_races_from_web() data = split_subraces(data) data = explicit_sources(data, EXPLICIT_SOURCES) data = fix_dupes(data, SOURCE_HIERARCHY) data = remove_ignored(data, IGNORED_SOURCES) data = srdfilter(data) dump(data, 'races.json') dump(srdonly(data), 'srd-races.json') diff('srd-races.json')
def run(): data = get_classes_from_web() data = filter_ignored(data) data = srdfilter(data) data = recursive_tag(data) data = fix_subclass_dupes(data) classfeats = parse_classfeats(data) classfeats.extend(parse_invocations()) dump(data, 'classes.json') dump(classfeats, 'classfeats.json') diff('classes.json') diff('classfeats.json')
def run(): data = get_latest_items() data = moneyfilter(data) data = variant_inheritance(data) objects = get_objects() objects = object_actions(objects) data.extend(objects) data = srdfilter(data) data = prerender(data) sitedata = site_render(data) dump(data, 'items.json') diff('items.json') dump(sitedata, 'template-items.json')
def run(): data = get_spells() processed = parse(data) processed = srdfilter(processed) dump(processed, 'spells.json') srd = ensure_ml_order(srdonly(processed), True) dump(srd, 'srd-spells.json') diff('srd-spells.json') dump(get_auto_only(processed), 'spellauto.json') site_templates = site_parse(processed) dump(site_templates, 'template-spells.json')
"enter/37558/09:11:01 pm--53", "enter/34551/09:13:51 pm--59", "enter/30306/08:10:01 pm--31", "enter/37558/08:03:47 pm--25", "enter/30306/08:10:11 pm--32", "enter/37558/09:32:58 pm--71", "enter/38488/08:42:58 pm--44", "enter/37666/11:06:06 pm--105", "enter/20324/08:15:29 pm--37", "enter/37558/08:38:04 pm--42", "enter/37558/09:13:47 pm--58", "enter/16206/09:30:15 pm--70", "enter/37760/09:49:17 pm--86", "enter/36804/07:41:30 pm--23", "enter/34551/09:14:45 pm--62", "enter/36533/09:12:44 pm--55", "enter/36533/09:37:11 pm--75", "enter/38259/07:31:33 pm--16", "enter/59/18:12--6", "enter/34551/09:15:51 pm--64", "enter/35345/10:05:23 pm--90", "enter/8215/08:43:43 pm--47", "enter/36436/09:07:24 pm--52", "enter/37666/11:05:35 pm--104", "enter/59/07:38:05 pm--21", "enter/38508/09:21:30 pm--66", "enter/36804/09:34:47 pm--72", "enter/37666/11:03:59 pm--101", "enter/30306/08:09:49 pm--30", "enter/59/07:06:58 pm--12", "enter/37760/09:49:35 pm--88", "enter/20324/08:14:59 pm--36", "enter/37666/11:04:07 pm--102", "enter/38259/07:32:09 pm--17", "enter/7/07:07:59 pm--13", "enter/28733/10:17:18 pm--94", "enter/35345/10:05:51 pm--91", "enter/59/07:36:49 pm--19", "enter/38512/09:26:31 pm--67", "enter/35086/10:12:37 pm--93", "enter/10536/10:35:04 pm--95", "enter/38512/09:28:11 pm--69", "enter/38259/19:12--7", "enter/30306/08:09:39 pm--29", "enter/37760/09:48:00 pm--80", "enter/8215/08:43:04 pm--45", "enter/36533/09:36:52 pm--74", "enter/38512/09:28:08 pm--68", "enter/38259/19:12--8", "enter/35568/09:48:45 pm--83", "enter/38259/07:32:30 pm--18", "enter/36533/08:21:12 pm--39", "enter/59/18:12--1", "enter/37760/09:48:26 pm--82" ] list_result = diff(list_20191225, list_20191226) print(len(list_20191225), len(list_20191226), len(list_result))
metadata = Path(args.metadata) videos_dir = Path(args.videos) output_dir = Path(args.output) index = Path(args.index) ############################################ # RECUPERA ESTADO ONDE PAROU # ############################################ if metadata.is_file(): full_df = pd.read_csv(metadata, sep=',') else: print(f'[!] ==> Arquivo de treino nao encontrado') sys.exit(1) if index.is_file(): index_df = pd.read_csv(index, sep=',', names=['video', 'utterance']) df = diff(full_df, index_df, ['video', 'utterance']) print(f'[!] ==> Estado recuperado com sucesso') else: df = full_df print(f'[!] ==> Iniciando processamento do zero') del full_df, args, parser, metadata ############################################ with open(index, 'a') as index_file: index_writer = csv.writer(index_file) for i, row in df.iterrows(): utterance = videos_dir / Path(row[3]) / Path('video') / Path(row[4]) out_dir = output_dir / Path(row[3]) / Path('video') / Path(row[4]).with_suffix('') if utterance.is_file(): mkdir(out_dir) net = load_yolonet()