コード例 #1
0
def find_tag(name, mergename):
    tag_filter = {}
    tag_filter1 = {}
    tag_filter2 = {}
    tag_filter2a = {}
    tag_filter1["value"] = name
    tag_filter2["value"] = name
    tag_filter1["modifier"] = "EQUALS"
    tag_filter2["modifier"] = "EQUALS"
    tag_filter2a["aliases"] = tag_filter2
    tag_filter["name"] = tag_filter1
    tag_filter["OR"] = tag_filter2a
    query = """
     query searchingforTag($tag_filter : TagFilterType) {
           findTags(tag_filter: $tag_filter){
                count
                tags {
                  id
                  name
             }
           }
       }
    """
    variables = {"tag_filter": tag_filter}
    #log.debug(variables)
    result = call_graphql(query, variables)
    #log.debug(result)
    if result["findTags"]["count"]:
        log.info("You'll need to merge this - Desired main tag " + mergename +
                 " <- desired aliased tag:  " + name)
        return result["findTags"]["count"]
    log.debug("No tag found with name: " + name)
    return 0
コード例 #2
0
def searchName(scene_name):
    TMDBsearch = Search()
    search_results = TMDBsearch.movies({
        "query": scene_name,
        "include_adult": "true"
    })
    scraped_json = None
    result_search = []
    for result in search_results:
        search = {}
        search['title'] = result.get('title')
        search['date'] = result.get('release_date')
        search['details'] = result.get('overview')
        try:
            search['image'] = base_url + poster_size + result.get(
                'poster_path')
        except:
            pass
        search['url'] = "https://themoviedb.org/movie/" + str(result.get('id'))
        search['remote_site_id'] = str(result.get('id'))
        result_search.append(search)
    if not result_search:
        log.debug("[ERROR] TMDB API Search Error. No movies found")
    scraped_json = result_search
    print(json.dumps(scraped_json))
    exit
コード例 #3
0
def addFranchise(query, results):
    cleanedQuery = requests.utils.quote(query)
    url = f"https://www.animecharactersdatabase.com/api_series_characters.php?character_q={cleanedQuery}"
    data = json.loads(scrapeUrlToString(url))
    count1 = 0
    count2 = 0
    for result in results:
        try:
            # Try to find the franchise in API search results.
            # These results are ordered by alphabet and limited to 100,
            # so short queries might not include the correct result.
            # The API query also does not seem to support any Kanji.
            franchise = next(e["anime_name"] for e in data["search_results"]
                             if str(e["id"]) == result["id"])
            count1 += 1
        except:
            # Use separate API calls as a backup.
            # This might get you banned, since the API is rate limited.
            franchise = apiGetCharacter(result["id"])["origin"]
            count2 += 1
        # Append franchise to character name for easier differentiation.
        result["name"] = f"{result['name']} ({franchise})"
        result.pop("id")
    log.debug(f"scraped {count1} franchises by single API call")
    log.debug(f"scraped {count2} franchises by separate API calls")
    return results
コード例 #4
0
def update_studio(studio, studio_data):
    studioinfo = {}
    studioinfo["id"] = studio["id"]
    #log.debug(studio)
    #log.debug(studio_data)
    try:
        studioinfo["image"] = studio_data["images"][0]["url"]
    except:
        log.debug("No Image on " + studio_data["name"])
    if studio["name"] != studio_data["name"]:
        studioinfo["name"] = studio_data["name"]
        if len(studio["aliases"]):
            studioinfo["aliases"] = studio["aliases"] + [studio["name"]]
        else:
            studioinfo["aliases"] = [studio["name"]]

    query = """
                        mutation studioimageadd($input: StudioUpdateInput!) {
                           studioUpdate(input: $input) {
                             name
                             image_path
                           }
                        }
                       """
    variables = {"input": studioinfo}
    #log.debug(variables)
    result = call_graphql(query, variables)
    if result:
        #log.debug(result)
        if result["studioUpdate"] is None:
            log.info("Failed to update, check for an existing Studio named " +
                     studio_data["name"])
    return result
コード例 #5
0
def scraping_json(api_json, url=None):
    scrape = {}
    # Title
    if api_json.get('title'):
        scrape['title'] = api_json['title'].strip()
    # Date
    scrape['date'] = api_json.get('release_date')
    # Details
    scrape['details'] = re.sub(r'</br>|<br\s/>|<br>|<br/>', '\n', api_json.get('description'))

    # Studio
    scrape['studio'] = {}
    if api_json.get('serie_name'):
        scrape['studio']['name'] = api_json.get('serie_name')

    log.debug(
        "[STUDIO] {} - {} - {} - {}".format(
            api_json.get('serie_name'), api_json.get('network_name'),
            api_json.get('mainChannelName'), api_json.get('sitename_pretty')))
    # Performer
    perf = []
    for x in api_json.get('actors'):
        if x.get('gender') == "female":
            perf.append({
                "name": x.get('name').strip(),
                "gender": x.get('gender')
            })
    scrape['performers'] = perf

    # Tags
    list_tag = []
    for x in api_json.get('categories'):
        if x.get('name') is None:
            continue
        tag_name = x.get('name')
        tag_name = " ".join(x.capitalize() for x in tag_name.split(" "))
        if tag_name:
            list_tag.append({"name": x.get('name')})
    if FIXED_TAGS:
        list_tag.append({"name": FIXED_TAGS})
    scrape['tags'] = list_tag

    # Image
    try:
        scrape['image'] = 'https://images03-fame.gammacdn.com/movies' + next(iter(api_json['pictures']['nsfw']['top'].values()))
    except:
        try:
            scrape['image'] = 'https://images03-fame.gammacdn.com/movies' + next(iter(api_json['pictures']['sfw']['top'].values()))
        except:
            log.warning("Can't locate image.")
    # URL
    try:
        scrape['url'] = 'https://{}.com/en/video/{}/{}/{}'.format(
            api_json["sitename"], api_json["sitename"], api_json["url_title"],
            api_json["clip_id"])
    except:
        if url:
            scrape['url'] = url
    #debug("{}".format(scrape))
    return scrape
コード例 #6
0
def main():
    stdin = sys.stdin.read()
    log.debug(stdin)
    fragment = json.loads(stdin)

    if not fragment['url']:
        log.error('No URL entered.')
        sys.exit(1)
    url = fragment['url'].strip()

    site, studio, el_id, slug = get_from_url(url)
    if site is None:
        log.error('The URL could not be parsed')
        sys.exit(1)

    response, err = make_request(url, f"https://{site}")
    if err is not None:
        log.error(f"Could not fetch page HTML: {err}")
        sys.exit(1)

    j = fetch_page_json(response)
    if j is None:
        log.error('Could not find JSON on page')
        sys.exit(1)

    if len(sys.argv) == 0 or sys.argv[1] == "scene":
        scrape_scene(page_json=j, studio=studio)
    elif sys.argv[1] == "performer":
        scrape_performer(j)
コード例 #7
0
def sendRequest(url, head, json=""):
    log.debug("Request URL: {}".format(url))
    response = requests.post(url, headers=head, json=json, timeout=10)
    #log.debug("Returned URL: {}".format(response.url))
    if response.content and response.status_code == 200:
        return response
    else:
        log.warning("[REQUEST] Error, Status Code: {}".format(response.status_code))
        #print(response.text, file=open("algolia_request.html", "w", encoding='utf-8'))
    return None
コード例 #8
0
def main():
    stdin = sys.stdin.read()
    log.debug(stdin)
    fragment = json.loads(stdin)

    if not fragment['url']:
        log.error('No URL entered.')
        sys.exit(1)
    url = fragment['url'].strip()
    site, studio, sid, slug = get_from_url(url)
    if site is None:
        log.error('The URL could not be parsed')
        sys.exit(1)
    response, err = make_request(url, f"https://{site}")
    if err is not None:
        log.error('Could not fetch page HTML', err)
        sys.exit(1)
    j = fetch_page_json(response)
    if j is None:
        log.error('Could not find JSON on page')
        sys.exit(1)
    if 'video' not in j['data']:
        log.error('Could not locate scene within JSON')
        sys.exit(1)

    scene = j["data"]["video"]

    if scene.get('id'):
        if str(scene['id']) != sid:
            log.error('Wrong scene within JSON')
            sys.exit(1)
        log.info(f"Scene {sid} found")
    scrape = {}
    if scene.get('title'):
        scrape['title'] = scene['title']
    if scene.get('release_date'):
        scrape['date'] = scene['release_date'][:10]
    if scene.get('description'):
        details = BeautifulSoup(scene['description'], "html.parser").get_text()
        scrape['details'] = details
    if scene.get('sites'):
        scene_studio = scene['sites'][0]['name']
        scrape['studio'] = {'name': scene_studio}
    if scene.get('models'):
        models = []
        for m in scene['models']:
            models.extend([x.strip() for x in m['name'].split("&")])
        scrape['performers'] = [{'name': x} for x in models]
    if scene.get('tags'):
        scrape['tags'] = [{'name': x['name']} for x in scene['tags']]
    if j['data'].get('file_poster'):
        scrape['image'] = j['data']['file_poster']
    print(json.dumps(scrape))
コード例 #9
0
 def getScene(self, url: str):
     log.debug(f"Scraping using {self.name} graphql API")
     q = {
         'query': self.getVideoQuery,
         'operationName': "getVideo",
         'variables': {
             "site": self.id,
             "videoSlug": self.getSlug(url)
         }
     }
     r = self.callGraphQL(query=q, referer=url)
     return self.parse_scene(r)
コード例 #10
0
    def __init__(self, fragments={}):
        scheme = "https"
        domain = 'www.traxxx.me'

        if self.port:
            domain = f'{domain}:{self.port}'

        # Stash GraphQL endpoint
        self.url = f'{scheme}://{domain}/graphql'
        log.debug(f"Using GraphQl endpoint at {self.url}")

        self.fragments = fragments
        self.fragments.update(traxxx_gql_fragments)
コード例 #11
0
 def getSearchResult(self, query: str):
     log.debug(f"Searching using {self.name} graphql API")
     q = {
         'query': self.getSearchQuery,
         'operationName': "getSearchResults",
         'variables': {
             "site": self.id,
             "query": query,
             "first": self.search_count
         }
     }
     r = self.callGraphQL(query=q, referer=self.home)
     return self.parse_search(r)
コード例 #12
0
def updateScene_with_gallery(scene_id, gallery_id):
    data = {'id': scene_id, 'gallery_ids': [gallery_id]}
    log.debug("data " + str(data))
    query = """
                mutation SceneUpdate($input : SceneUpdateInput!) {
                  sceneUpdate(input: $input) {
                    id
                    title
                  }
                }
                """
    variables = {"input": data}
    result = call_graphql(query, variables)
    log.debug("graphql_updateGallery callGraphQL result " + str(result))
コード例 #13
0
def find_galleries(scene_id, scene_path):
    ids = []
    directory_path = os.path.dirname(scene_path)
    for (cur, dirs, files) in os.walk(directory_path):

        for file in files:
            if file.endswith('.zip'):
                gallery_path = os.path.join(cur, file)
                id = get_gallery_id_by_path(gallery_path)
                updateScene_with_gallery(scene_id, id)
                ids.append(id)
        break
    log.debug("find_galleries ids' found " + str(ids))
    return ids
コード例 #14
0
def write_config(date, app_id, api_key):
    log.debug("Writing config!")
    config = ConfigParser()
    config.read(STOCKAGE_FILE_APIKEY)
    try:
        config.get(SITE, 'date')
    except NoSectionError:
        config.add_section(SITE)
    config.set(SITE, 'date', date.strftime("%Y-%m-%d %H:%M:%S.%f"))
    config.set(SITE, 'app_id', app_id)
    config.set(SITE, 'api_key', api_key)
    with open(STOCKAGE_FILE_APIKEY, 'w') as configfile:
        config.write(configfile)
    return
コード例 #15
0
def check_db(DB_PATH, SCENE_ID):
    try:
        sqliteConnection = sqlite3.connect("file:" + DB_PATH + "?mode=ro", uri=True)
        log.debug("Connected to SQLite database")
    except:
        log.warning("Fail to connect to the database")
        return None, None, None
    cursor = sqliteConnection.cursor()
    cursor.execute("SELECT size,duration,height from scenes WHERE id=?;", [SCENE_ID])
    record = cursor.fetchall()
    database = {}
    database["size"] = int(record[0][0])
    database["duration"] = int(record[0][1])
    database["height"] = str(record[0][2])
    cursor.close()
    sqliteConnection.close()
    return database
コード例 #16
0
def update_gallery(input):
    log.debug("gallery input " + str(input))
    query = """
                mutation GalleryUpdate($input : GalleryUpdateInput!) {
                  galleryUpdate(input: $input) {
                    id
                    title
                  }
                }
                """
    variables = {"input": input}
    result = call_graphql(query, variables)
    if result:
        g_id = result['galleryUpdate'].get('id')
        g_title = result['galleryUpdate'].get('title')
        log.info(f"updated Gallery ({g_id}): {g_title}")
    return result
コード例 #17
0
def check_config(domain, time):
    if os.path.isfile(STOCKAGE_FILE_APIKEY):
        config = ConfigParser()
        config.read(STOCKAGE_FILE_APIKEY)
        try:
            time_past = datetime.datetime.strptime(
                config.get(domain, 'date'), '%Y-%m-%d %H:%M:%S.%f')

            if time_past.hour - 1 < time.hour < time_past.hour + 1 and (time - time_past).days == 0:
                log.debug("Using old key")
                application_id = config.get(domain, 'app_id')
                api_key = config.get(domain, 'api_key')
                return application_id, api_key
            else:
                log.info(
                    "Need new api key: [{}|{}|{}]".format(
                        time.hour, time_past.hour, (time - time_past).days))
        except NoSectionError:
            pass
    return None, None
コード例 #18
0
def get_gallery_id_by_path(gallery_path):
    log.debug("get_gallery_by_path gallery_path " + str(gallery_path))
    query = """
            query FindGalleries($galleries_filter: GalleryFilterType) {
              findGalleries(gallery_filter: $galleries_filter filter: {per_page: -1}) {
                count
                    galleries {
                        id
                    }
              }
            }
            """
    variables = {
        "galleries_filter": {
            "path": {
                'value': gallery_path,
                "modifier": "INCLUDES_ALL"
            }
        }
    }
    result = call_graphql(query, variables)
    log.debug("get_gallery_by_path callGraphQL result " + str(result))
    return result['findGalleries']['galleries'][0]['id']
コード例 #19
0
def scrape_scene(url):
    query = """query scrapeSceneURL($url: String!) {
                  scrapeSceneURL(url: $url) {
                        title
                        details
                        date
                        image
                        studio {
                            name
                        }
                        tags {
                            name
                        }
                        performers {
                            name
                        }
                    }
                }"""

    variables = {'url': url}
    result = call_graphql(query, variables)
    log.debug(f"result {result}")
    return result["scrapeSceneURL"]
コード例 #20
0
def update_movie(movie_id, movie_url, scene):
    query = """
      query rescrapeMovie($url: String!) {
       scrapeMovieURL (url: $url) {
        name
        duration
        date
        director
        url
        synopsis
        front_image
        back_image
       }
      }
      """
    variables = {"url": movie_url}
    result = call_graphql(query, variables)
    if result:
        newinfo = result['scrapeMovieURL']
        input = newinfo
        input["id"] = movie_id
        if "duration" in newinfo:
            try:
                if ":" in newinfo["duration"]:
                    # the result is in the hr:min:second format while the schema requires seconds as int
                    input["duration"] = get_sec(newinfo["duration"])
            except:
                log.debug("No duration")
        #
        # this fails earlier if we query a studioless entry, so we have to do it again
        try:
            query = """
            query rescrapeMovie($url: String!) {
              scrapeMovieURL (url: $url) {
              studio {stored_id}
              }
            }
            """
            variables = {"url": movie_url}
            resultstudio = call_graphql(query, variables)
            input["studio_id"] = resultstudio['scrapeMovieURL']["studio"][
                "stored_id"]
        except:
            log.debug("No Studio")

        variables = {"input": input}
        query = """
           mutation resaveMovie($input: MovieUpdateInput! ) {
             movieUpdate( input: $input) {
               id
               name
             }
           }
           """
        result = call_graphql(query, variables)
        log.debug(result)
    return
コード例 #21
0
else:
    log.error('You need to set the URL (e.g. teamskeet.com/movies/*****)')
    sys.exit(1)

if "teamskeet.com/movies/" not in scene_url:
    log.error('The URL is not from a Teamskeet URL (e.g. teamskeet.com/movies/*****)')
    sys.exit(1)

scene_id = re.sub('.+/', '', scene_url)
if not scene_id:
    log.error("Error with the ID ({})\nAre you sure that the end of your URL is correct ?".format(scene_id))
    sys.exit(1)
use_local = 0
json_file = os.path.join(DIR_JSON, scene_id+".json")
if os.path.isfile(json_file):
    log.debug("Using local JSON...")
    use_local = 1
    with open(json_file, encoding="utf-8") as json_file:
        scene_api_json = json.load(json_file)
else:
    log.debug("Asking the API...")
    api_url = f"https://store2.psmcdn.net/ts-elastic-d5cat0jl5o-videoscontent/_doc/{scene_id}"
    headers = {
        'User-Agent': USER_AGENT,
        'Origin': 'https://www.teamskeet.com',
        'Referer': 'https://www.teamskeet.com/'
    }
    scraper = cloudscraper.create_scraper()
    # Send to the API
    r = ""
    try:
コード例 #22
0
    g_id = fragment.get("id")
    if not g_id:
        log.error(f"No ID found")
        sys.exit(1)

    gallery = graphql.getGalleryPath(g_id)
    if gallery:
        gallery_path = gallery.get("path")
        if gallery_path:
            p = pathlib.Path(gallery_path)
            
            res = {"title": fragment["title"]}
            # Determine if loose file format or archive such as .cbz or .zip
            if "cbz" in gallery_path or "zip" in gallery_path:
                 # Look for filename.xml where filename.(cbz|zip) is the gallery
                 f = p.with_suffix('.xml')
                 log.debug(f"Single File Format, using: {f}")
            else:
                # Use loose files format
                # Look for ComicInfo.xml in the gallery's folder
                f = pathlib.Path(p.resolve(),"ComicInfo.xml")
                log.debug(f"Folder format, using:{f}")

            if f.is_file():
                res = query_xml(f, fragment["title"])
            else:
                log.warning(f'No xml files found for the gallery: {p}')

            print(json.dumps(res))
            exit(0)
コード例 #23
0
 def getScene(self, url: str):
     log.debug(f"Scraping using {self.name} graphql API")
     q = {"variables": {"videoId": self.id}, "query": self.getVideoQuery}
     r = self.callGraphQL(q)
     return self.parse_scene(r)
コード例 #24
0
def match_result(api_scene, range_duration=60, single=False):
    api_title = api_scene.get("title")
    api_duration = int(api_scene.get("length"))
    api_filesize = None
    match_duration = False
    match_size = False
    # Using database
    if database_dict:
        db_duration = int(database_dict["duration"])
        db_height = str(database_dict["height"])
        db_size = int(database_dict["size"])
        if api_scene.get("download_file_sizes"):
            if db_height == "2160":
                api_filesize = api_scene["download_file_sizes"].get("4k")
            else:
                api_filesize = api_scene["download_file_sizes"].get(db_height + "p")
            if api_filesize:
                api_filesize = int(api_filesize)
        if api_filesize is None:
            api_filesize = api_scene.get("index_size")
            if api_filesize:
                api_filesize = int(api_filesize)
        if db_duration - range_duration <= api_duration <= db_duration + range_duration:
            match_duration = True
        db_size_max = db_size + (db_size / 100)
        db_size_min = db_size - (db_size / 100)
        if api_filesize:
            if db_size_min <= api_filesize <= db_size_max:
                match_size = True
    # Post process things
    match_domain = False
    if url_domain:
        if api_scene.get("sitename"):
            #log.debug("API Sitename: {}".format(api_scene["sitename"]))
            if api_scene["sitename"] == url_domain:
                match_domain = True
        if api_scene.get("network_name"):
            #log.debug("API Network: {}".format(api_scene["network_name"]))
            if api_scene["network_name"] == url_domain:
                match_domain = True

    # Matching ratio
    if SCENE_TITLE:
        match_ratio_title = difflib.SequenceMatcher(
            None, SCENE_TITLE.lower(), api_title.lower()).ratio()
    else:
        match_ratio_title = 0
    if url_title and api_scene.get("url_title"):
        match_ratio_title_url = difflib.SequenceMatcher(
            None, url_title.lower(), api_scene["url_title"].lower()).ratio()
    else:
        match_ratio_title_url = 0

    # Rank search result

    log.debug(
        "[MATCH] Title: {} |-RATIO-| Title: {} / URL: {} |-MATCH-| Duration: {}, Size: {}, Domain: {}"
        .format(
            api_title, round(match_ratio_title, 5), round(match_ratio_title_url, 5),
            match_duration, match_size, match_domain))
    match_dict = {}
    match_dict["title"] = match_ratio_title
    match_dict["url"] = match_ratio_title_url
    information_used = ""
    if (single and (match_duration or (database_dict is None and match_ratio_title_url > 0.5))) or match_ratio_title_url == 1:
        information_used += "A"
    if match_size:
        information_used += "S"
    if match_duration:
        information_used += "D"
    if match_domain:
        information_used += "N"
    if information_used == "":
        information_used = "R"
    match_dict["info"] = information_used
    #debug("[MATCH] {} - {}".format(api_title,match_dict))
    return match_dict
コード例 #25
0
def performerByURL(url, result={}):
    log.debug("performerByURL: " + url)
    tree = scrapeURL(url)
    result["url"] = url
    result["name"] = next(
        iter(
            tree.xpath(
                "//h3[@id='section001_summary']/following-sibling::p/a[contains(@href,'character')]/text()"
            )), "").strip()
    result["details"] = "\n".join([
        s.strip() for s in tree.xpath(
            "//div[@style='padding: 0 15px 15px 15px; text-align: left;']/text()"
        )
    ])
    if not result["details"]:
        result["details"] = re.sub(
            " .$", ".", " ".join([
                s.strip() for s in tree.xpath(
                    "//h3[@id='section001_summary']/following-sibling::p[contains(a/@href,'character')]//text()"
                ) if s.strip()
            ]))
    result["image"] = next(
        iter(tree.xpath("//meta[@property='og:image']/@content")), "")

    # left table, works for link and plain text fields, return result list
    def parse_left(field):
        template = "//table//th[text()='{0}' or a/text()='{0}']/following-sibling::td/a/text()"
        return tree.xpath(template.format(field))

    result["tags"] = additional_tags
    if include_tag:
        result["tags"] += [{
            "name": tag_prefix + tag.strip()
        } for tag in parse_left("Tags ")]
    if include_parody:
        result["tags"] += [{
            "name": parody_prefix + tag.strip()
        } for tag in parse_left("From")]
    if include_blood_type:
        result["tags"] += [{
            "name": blood_type_prefix + tag.strip()
        } for tag in parse_left("Blood Type")]
    if include_race:
        result["tags"] += [{
            "name": race_prefix + tag.strip()
        } for tag in parse_left("Race")]
    if include_sign:
        result["tags"] += [{
            "name": sign_prefix + tag.strip()
        } for tag in parse_left("Sign")]
    if include_ship_class:
        result["tags"] += [{
            "name": ship_class_prefix + tag.strip()
        } for tag in parse_left("Ship Class")]
    result["country"] = next(iter(parse_left("Nationality")), "")
    birthday = parse_left("Birthday")
    birthyear = parse_left("Birthyear")
    if birthday and birthyear:
        birthdate = datetime.strptime(
            birthday[0].strip(),
            "%B %d").replace(year=int(birthyear[0].strip()))
        result["birthdate"] = birthdate.strftime("%Y-%m-%d")
    bust = parse_left("Bust")
    waist = parse_left("Waist")
    hip = parse_left("Hip")
    if bust and waist and hip:
        bust = bust[0].strip().replace("cm", "")
        waist = waist[0].strip().replace("cm", "")
        hip = hip[0].strip().replace("cm", "")
        result["measurements"] = "{}-{}-{}".format(bust, waist, hip)
    result["height"] = next(iter(parse_left("Height")),
                            "").strip().replace("cm", "")

    # middle/right table, reverse result list to prefer official appearance, return result or empty string
    def parse_right(field):
        template = "//table//th[text()='{}']/following-sibling::td/text()"
        return next(reversed(tree.xpath(template.format(field))),
                    "").strip().replace("Unknown", "")

    # should be tagged anyway if yes
    # if parse_right("Animal Ears") == "Yes":
    #     result["tags"] += [{"name": "performer:animal ears"}]
    hair_length = parse_right("Hair Length")
    if include_hair_length and hair_length:
        result["tags"] += [{"name": hair_length_prefix + hair_length}]
    apparent_age = parse_right("Apparent Age")
    if include_apparent_age and apparent_age:
        result["tags"] += [{"name": apparent_age_prefix + apparent_age}]
    result["gender"] = parse_right("Gender")
    result["eye_color"] = parse_right("Eye Color")
    result["hair_color"] = parse_right("Hair Color")

    return result
コード例 #26
0
def bulk_submit(scene):
    result = {}
    SubmittedTag = "4179"
    NeedsStashID = "4178"
    if SubmittedTag in get_id(scene["tags"]):
      log.info("Scene already submitted")
      return result
    oktosubmit = True
    if scene["stash_ids"] == []:
      # check performers are all stashided
      for performer in scene["performers"]:
         if performer["stash_ids"] == []:
           oktosubmit = False
           log.info(performer["name"] + " needs a StashID")
           # tag performer to fix them
           tagging = {}
           tagging["id"] = performer["id"]
           tagging["tag_ids"] = []
           tagging["tag_ids"].append(NeedsStashID)
           query = """
            mutation tagforlater($input: PerformerUpdateInput!) {
                  performerUpdate(input: $input) { 
                   name 
                  }
            }
            """
           variables = {
             "input": tagging
           }
           result = call_graphql(query, variables)
           if result:
             log.info(result)

      if oktosubmit:
        submission = {}
        submission["id"] = scene["id"]
        submission["stash_box_index"] = 0
        query = """
            mutation BulkSubmitScene($input: StashBoxDraftSubmissionInput!) {
              submitStashBoxSceneDraft(input: $input)
            }
            """
        variables = {
          "input": submission
        }
        result = []
        result = call_graphql(query, variables)
        if result:
          log.info("Scene submitted as draft")
          log.info(result)
          tagging = {}
          tagging["id"] = scene["id"]
          tagging["tag_ids"] = []
          tagging["tag_ids"].append(SubmittedTag)
          query = """
            mutation tagsubmitted($input: SceneUpdateInput!) {
                  sceneUpdate(input: $input) {
                   title
                  }
            }
            """
          variables = {
             "input": tagging
          }
          result = call_graphql(query, variables)
          if result:
             log.debug(result)

    else:
       log.info("already has Stash id, not resubmitted")
    return result
コード例 #27
0
        scrape['url'] = 'https://{}.com/en/video/{}/{}/{}'.format(
            api_json["sitename"], api_json["sitename"], api_json["url_title"],
            api_json["clip_id"])
    except:
        if url:
            scrape['url'] = url
    #debug("{}".format(scrape))
    return scrape


SITE = sys.argv[1]

try:
    USERFOLDER_PATH = re.match(r".+\.stash.", __file__).group(0)
    CONFIG_PATH = USERFOLDER_PATH + "config.yml"
    log.debug("Config Path: {}".format(CONFIG_PATH))
except:
    USERFOLDER_PATH = None
    CONFIG_PATH = None

HEADERS = {
    "User-Agent":
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0',
    "Origin":
        "https://www.{}.com".format(SITE),
    "Referer":
        "https://www.{}.com".format(SITE)
}

FRAGMENT = json.loads(sys.stdin.read())
SEARCH_TITLE = FRAGMENT.get("name")
コード例 #28
0
        if len(tagdata["aliases"]):
            for alias in tagdata["aliases"]:
                #log.debug(alias)
                tagcheck = []
                for tagalias in tagx["aliases"]:
                    tagcheck.append(tagalias.casefold())
                if alias.casefold() not in tagcheck:
                    # we need to check if it's another tag (or alias)
                    #log.debug(alias + " looks new...")
                    if name.casefold() == alias.casefold():
                        # yes, we want this one
                        newaliases.append(alias)
                    else:
                        check = find_tag(alias, tagdata["name"])
                        if check:
                            log.debug(alias + " exists... not added.")
                        else:
                            newaliases.append(alias)
        #log.debug(newaliases)
        if newaliases:
            update_tag_aliases(tagx, newaliases)

# bulk get all studios
# count = 0
# allstudios = get_all_studios()
# for studio in allstudios:
#    #log.debug(studio)
#    studio_id = studio["id"]
#    studio_name = studio["name"]
#    if studio["stash_ids"] != []:
#        studio_stashid = studio["stash_ids"][0]["stash_id"]
コード例 #29
0
    # if the first character is $, filter will be ignored.
    if search_query[0] != "$":
        # make sure longer matches are filtered first
        studios_sorted = sortByLength(studios)
        for x in studios_sorted:
            if x.id.lower() in search_query:
                filter.append(x.id.lower())
                continue
            # remove the studio from the search result
            search_query = search_query.replace(x.id.lower(), "")
    else:
        search_query = search_query[1:]

    if filter:
        log.info(f"Filter: {filter} applied")

    log.debug(f"Query: '{search_query}'")

    for x in studios:
        if filter:
            if x.id.lower() not in filter:
                #log.debug(f"[Filter] {x.id} ignored")
                continue
        s = x.getSearchResult(search_query)
        # merge all list into one
        if s:
            lst.extend(s)
    #log.debug(f"{json.dumps(lst)}")
    print(json.dumps(lst))
    sys.exit(0)
コード例 #30
0
                    title
                  }
                }
                """
    variables = {"input": data}
    result = call_graphql(query, variables)
    log.debug("graphql_updateGallery callGraphQL result " + str(result))


FRAGMENT = json.loads(sys.stdin.read())
SCENE_ID = FRAGMENT.get("id")

scene = graphql.getScene(SCENE_ID)
if scene:
    scene_galleries = scene['galleries']
    log.debug("scene_galleries " + str(scene_galleries))
    gallery_ids = []
    if len(scene_galleries) > 0:
        for gallery_obj in scene_galleries:
            gallery_ids.append(gallery_obj['id'])
    elif find_gallery:
        # if no galleries are associated see if any gallery zips exist in directory
        gallery_ids = find_galleries(SCENE_ID, scene["path"])
    log.debug("gallery_ids " + str(gallery_ids))

    for gallery_id in gallery_ids:
        studio = None
        if scene['studio']:
            studio = scene['studio']['id']
        gallery_input = {
            'id': gallery_id,