def scene_by_name(fragment): scenes = search_traxxx_for_scene(fragment) if scenes: return [traxxx.parse_to_stash_scene_search(s) for s in scenes] else: log.warning("No scene results from Traxxx") return []
def scraping_json(api_json, url=None): scrape = {} # Title if api_json.get('title'): scrape['title'] = api_json['title'].strip() # Date scrape['date'] = api_json.get('release_date') # Details scrape['details'] = re.sub(r'</br>|<br\s/>|<br>|<br/>', '\n', api_json.get('description')) # Studio scrape['studio'] = {} if api_json.get('serie_name'): scrape['studio']['name'] = api_json.get('serie_name') log.debug( "[STUDIO] {} - {} - {} - {}".format( api_json.get('serie_name'), api_json.get('network_name'), api_json.get('mainChannelName'), api_json.get('sitename_pretty'))) # Performer perf = [] for x in api_json.get('actors'): if x.get('gender') == "female": perf.append({ "name": x.get('name').strip(), "gender": x.get('gender') }) scrape['performers'] = perf # Tags list_tag = [] for x in api_json.get('categories'): if x.get('name') is None: continue tag_name = x.get('name') tag_name = " ".join(x.capitalize() for x in tag_name.split(" ")) if tag_name: list_tag.append({"name": x.get('name')}) if FIXED_TAGS: list_tag.append({"name": FIXED_TAGS}) scrape['tags'] = list_tag # Image try: scrape['image'] = 'https://images03-fame.gammacdn.com/movies' + next(iter(api_json['pictures']['nsfw']['top'].values())) except: try: scrape['image'] = 'https://images03-fame.gammacdn.com/movies' + next(iter(api_json['pictures']['sfw']['top'].values())) except: log.warning("Can't locate image.") # URL try: scrape['url'] = 'https://{}.com/en/video/{}/{}/{}'.format( api_json["sitename"], api_json["sitename"], api_json["url_title"], api_json["clip_id"]) except: if url: scrape['url'] = url #debug("{}".format(scrape)) return scrape
def performer_lookup(fragment): performers = traxxx.search_performers(fragment["name"]) if performers: return [traxxx.parse_to_stash_performer_search(p) for p in performers] else: log.warning("No performer results from Traxxx") return []
def scene_query_fragment(fragment): traxxx_url = fragment.get("url", "") m = re.search(r'traxxx.me/scene/(\d+)/', traxxx_url) if not m: log.warning(f'could not parse scene ID from URL: {traxxx_url}') return scene_id = m.group(1) scene = traxxx.get_scene(scene_id) return traxxx.parse_to_stash_scene(scene)
def sendRequest(url, head, json=""): log.debug("Request URL: {}".format(url)) response = requests.post(url, headers=head, json=json, timeout=10) #log.debug("Returned URL: {}".format(response.url)) if response.content and response.status_code == 200: return response else: log.warning("[REQUEST] Error, Status Code: {}".format(response.status_code)) #print(response.text, file=open("algolia_request.html", "w", encoding='utf-8')) return None
def check_db(DB_PATH, SCENE_ID): try: sqliteConnection = sqlite3.connect("file:" + DB_PATH + "?mode=ro", uri=True) log.debug("Connected to SQLite database") except: log.warning("Fail to connect to the database") return None, None, None cursor = sqliteConnection.cursor() cursor.execute("SELECT size,duration,height from scenes WHERE id=?;", [SCENE_ID]) record = cursor.fetchall() database = {} database["size"] = int(record[0][0]) database["duration"] = int(record[0][1]) database["height"] = str(record[0][2]) cursor.close() sqliteConnection.close() return database
if (CONFIG_PATH and DB_PATH is None): # getting your database from the config.yml if (os.path.isfile(CONFIG_PATH)): with open(CONFIG_PATH) as f: for line in f: if "database: " in line: DB_PATH = line.replace("database: ", "").rstrip('\n') break log.debug("Database Path: {}".format(DB_PATH)) if DB_PATH: if SCENE_ID: # Get data by GraphQL database_dict = graphql.getScene(SCENE_ID) if database_dict is None: # Get data by SQlite log.warning("GraphQL request failed, accessing database directly...") database_dict = check_db(DB_PATH, SCENE_ID) else: database_dict = database_dict["file"] log.debug("[DATABASE] Info: {}".format(database_dict)) else: database_dict = None log.debug("URL scraping... Ignoring database...") else: database_dict = None log.warning("Database path missing.") # Extract things url_title = None url_id = None url_domain = None
g_id = fragment.get("id") if not g_id: log.error(f"No ID found") sys.exit(1) gallery = graphql.getGalleryPath(g_id) if gallery: gallery_path = gallery.get("path") if gallery_path: p = pathlib.Path(gallery_path) res = {"title": fragment["title"]} # Determine if loose file format or archive such as .cbz or .zip if "cbz" in gallery_path or "zip" in gallery_path: # Look for filename.xml where filename.(cbz|zip) is the gallery f = p.with_suffix('.xml') log.debug(f"Single File Format, using: {f}") else: # Use loose files format # Look for ComicInfo.xml in the gallery's folder f = pathlib.Path(p.resolve(),"ComicInfo.xml") log.debug(f"Folder format, using:{f}") if f.is_file(): res = query_xml(f, fragment["title"]) else: log.warning(f'No xml files found for the gallery: {p}') print(json.dumps(res)) exit(0)