예제 #1
0
def main():
    global traxxx

    mode = sys.argv[1]
    traxxx = TraxxxInterface()
    fragment = json.loads(sys.stdin.read())

    data = None

    log.info(mode)

    if mode == 'scene_name':
        data = scene_by_name(fragment)
    if mode == 'scene_url':
        data = scene_query_fragment(fragment)
    if mode == 'scene_query_fragment':
        data = scene_query_fragment(fragment)
    if mode == 'scene_fragment':
        data = scene_fragment(fragment)

    if mode == 'performer_lookup':
        data = performer_lookup(fragment)
    if mode == 'performer_fragment':
        data = performer_fragment(fragment)
    if mode == 'performer_url':
        data = performer_url(fragment)

    # log.info(json.dumps(data))
    print(json.dumps(data))
예제 #2
0
def find_tag(name, mergename):
    tag_filter = {}
    tag_filter1 = {}
    tag_filter2 = {}
    tag_filter2a = {}
    tag_filter1["value"] = name
    tag_filter2["value"] = name
    tag_filter1["modifier"] = "EQUALS"
    tag_filter2["modifier"] = "EQUALS"
    tag_filter2a["aliases"] = tag_filter2
    tag_filter["name"] = tag_filter1
    tag_filter["OR"] = tag_filter2a
    query = """
     query searchingforTag($tag_filter : TagFilterType) {
           findTags(tag_filter: $tag_filter){
                count
                tags {
                  id
                  name
             }
           }
       }
    """
    variables = {"tag_filter": tag_filter}
    #log.debug(variables)
    result = call_graphql(query, variables)
    #log.debug(result)
    if result["findTags"]["count"]:
        log.info("You'll need to merge this - Desired main tag " + mergename +
                 " <- desired aliased tag:  " + name)
        return result["findTags"]["count"]
    log.debug("No tag found with name: " + name)
    return 0
예제 #3
0
    def get_scene(self, traxxx_scene_id):
        query = """
      query Releases(
        $sceneId: Int!
      ) {
        releases(
          filter:{id:{equalTo:$sceneId}}
        ) {
          ...traxScene
        }
      }
    """

        variables = {'sceneId': int(traxxx_scene_id)}

        response = self.__callGraphQL(query, variables).get("releases")

        log.info(
            f'scene traxxxID lookup "{traxxx_scene_id}" returned {len(response)} results'
        )

        if len(response) > 0:
            return response[0]
        else:
            return None
예제 #4
0
    def get_performer(self, traxxx_performer_id):
        query = """
      query Actors(
        $actorId: Int!
      ) {
       actors: actors(
            filter:{id:{equalTo:$actorId}}
          ) {
            ...traxActor
          }
        }
    """

        variables = {'actorId': int(traxxx_performer_id)}

        response = self.__callGraphQL(query, variables).get("actors")

        log.info(
            f'performer traxxxID lookup "{traxxx_performer_id}" returned {len(response)} results'
        )

        if len(response) > 0:
            return response[0]
        else:
            return None
예제 #5
0
    def search_scenes(self, search, numResults=20):
        query = """
      query SearchReleases(
        $query: String!
        $limit: Int = 20
      ) {
        scenes: searchReleases(
          query: $query
          first: $limit
          orderBy: RANK_DESC
          filter: {
            rank: {
              greaterThan: 0.045
            }
          }
        ) {
          release {
            ...traxScene
          }
          rank
        }
      }
    """

        variables = {'query': search, 'limit': int(numResults)}
        result = self.__callGraphQL(query, variables)
        log.info(
            f'scene search "{search}" returned {len(result["scenes"])} results'
        )

        return [s["release"] for s in result["scenes"]]
예제 #6
0
def update_studio(studio, studio_data):
    studioinfo = {}
    studioinfo["id"] = studio["id"]
    #log.debug(studio)
    #log.debug(studio_data)
    try:
        studioinfo["image"] = studio_data["images"][0]["url"]
    except:
        log.debug("No Image on " + studio_data["name"])
    if studio["name"] != studio_data["name"]:
        studioinfo["name"] = studio_data["name"]
        if len(studio["aliases"]):
            studioinfo["aliases"] = studio["aliases"] + [studio["name"]]
        else:
            studioinfo["aliases"] = [studio["name"]]

    query = """
                        mutation studioimageadd($input: StudioUpdateInput!) {
                           studioUpdate(input: $input) {
                             name
                             image_path
                           }
                        }
                       """
    variables = {"input": studioinfo}
    #log.debug(variables)
    result = call_graphql(query, variables)
    if result:
        #log.debug(result)
        if result["studioUpdate"] is None:
            log.info("Failed to update, check for an existing Studio named " +
                     studio_data["name"])
    return result
예제 #7
0
def update_thumbnail(input):
    query = """
            mutation SceneGenerateScreenshot($id: ID!) {
              sceneGenerateScreenshot(id: $id)
            }
            """
    variables = {"id": input}
    result = call_graphql(query, variables)
    if result:
        log.info("queued rebuild of scene thumbnail")
    return result
예제 #8
0
def main():
    stdin = sys.stdin.read()
    log.debug(stdin)
    fragment = json.loads(stdin)

    if not fragment['url']:
        log.error('No URL entered.')
        sys.exit(1)
    url = fragment['url'].strip()
    site, studio, sid, slug = get_from_url(url)
    if site is None:
        log.error('The URL could not be parsed')
        sys.exit(1)
    response, err = make_request(url, f"https://{site}")
    if err is not None:
        log.error('Could not fetch page HTML', err)
        sys.exit(1)
    j = fetch_page_json(response)
    if j is None:
        log.error('Could not find JSON on page')
        sys.exit(1)
    if 'video' not in j['data']:
        log.error('Could not locate scene within JSON')
        sys.exit(1)

    scene = j["data"]["video"]

    if scene.get('id'):
        if str(scene['id']) != sid:
            log.error('Wrong scene within JSON')
            sys.exit(1)
        log.info(f"Scene {sid} found")
    scrape = {}
    if scene.get('title'):
        scrape['title'] = scene['title']
    if scene.get('release_date'):
        scrape['date'] = scene['release_date'][:10]
    if scene.get('description'):
        details = BeautifulSoup(scene['description'], "html.parser").get_text()
        scrape['details'] = details
    if scene.get('sites'):
        scene_studio = scene['sites'][0]['name']
        scrape['studio'] = {'name': scene_studio}
    if scene.get('models'):
        models = []
        for m in scene['models']:
            models.extend([x.strip() for x in m['name'].split("&")])
        scrape['performers'] = [{'name': x} for x in models]
    if scene.get('tags'):
        scrape['tags'] = [{'name': x['name']} for x in scene['tags']]
    if j['data'].get('file_poster'):
        scrape['image'] = j['data']['file_poster']
    print(json.dumps(scrape))
예제 #9
0
def apikey_get(site_url, time):
    r = sendRequest(site_url, HEADERS)
    if r is None:
        return None, None
    script_html = fetch_page_json(r.text)
    if script_html is not None:
        application_id = script_html['api']['algolia']['applicationID']
        api_key = script_html['api']['algolia']['apiKey']
        # Write key into a file
        write_config(time, application_id, api_key)
        log.info("New API keys: {}".format(api_key))
        return application_id, api_key
    else:
        log.error("Can't retrieve API keys from page ({})".format(site_url))
        return None, None
예제 #10
0
def get_tag_from_stashbox(name):
    query = """
       query getTag($name : String!) {
           findTag(name: $name){
                name
                aliases
           }
       }
    """
    variables = {"name": name}
    result = stashbox_call_graphql(query, variables)
    if result:
        #log.debug(result)
        return result["findTag"]
    log.info("No StashDB tag found with name: " + name)
    return None
def performerByName(query):
    cleanedQuery = requests.utils.quote(query)
    url = f"https://www.animecharactersdatabase.com/searchall.php?in=characters&sq={cleanedQuery}"
    tree = scrapeURL(url)
    names = tree.xpath("//li/div[@class='tile3top']/a/text()")
    ids = tree.xpath("//li/div[@class='tile3top']/a/@href")

    results = []
    for name, id in zip(names, ids):
        results.append({
            "name": name,
            "id": id.replace("characters.php?id=", ""),
            "url": "https://www.animecharactersdatabase.com/" + id
        })
    log.info(f"scraped {len(results)} results on: {url}")
    return results
예제 #12
0
def merge_tag(sourceid, destid):
    log.info("Merging: " + sourceid + "->" + destid)
    query = """
     mutation merging($input: TagsMergeInput!) {
        tagsMerge(input: $input) {
           name
         }
     }
     """
    merging = {}
    merging["source"] = [sourceid]
    merging["destination"] = destid
    variables = {"input": merging}
    results = call_graphql(query, variables)
    log.info(results)
    return
예제 #13
0
def update_gallery(input):
    log.debug("gallery input " + str(input))
    query = """
                mutation GalleryUpdate($input : GalleryUpdateInput!) {
                  galleryUpdate(input: $input) {
                    id
                    title
                  }
                }
                """
    variables = {"input": input}
    result = call_graphql(query, variables)
    if result:
        g_id = result['galleryUpdate'].get('id')
        g_title = result['galleryUpdate'].get('title')
        log.info(f"updated Gallery ({g_id}): {g_title}")
    return result
예제 #14
0
def check_config(domain, time):
    if os.path.isfile(STOCKAGE_FILE_APIKEY):
        config = ConfigParser()
        config.read(STOCKAGE_FILE_APIKEY)
        try:
            time_past = datetime.datetime.strptime(
                config.get(domain, 'date'), '%Y-%m-%d %H:%M:%S.%f')

            if time_past.hour - 1 < time.hour < time_past.hour + 1 and (time - time_past).days == 0:
                log.debug("Using old key")
                application_id = config.get(domain, 'app_id')
                api_key = config.get(domain, 'api_key')
                return application_id, api_key
            else:
                log.info(
                    "Need new api key: [{}|{}|{}]".format(
                        time.hour, time_past.hour, (time - time_past).days))
        except NoSectionError:
            pass
    return None, None
예제 #15
0
def update_tag_aliases(tag, aliases):
    taginput = {}
    taginput["id"] = tag["id"]
    taginput["name"] = tag["name"]
    taginput["aliases"] = tag["aliases"] + aliases
    query = """
                        mutation tagaliasadd($input: TagUpdateInput!) {
                           tagUpdate(input: $input) {
                             name
                             aliases
                           }
                        }
                       """
    variables = {"input": taginput}
    #log.debug(variables)
    result = call_graphql(query, variables)
    #log.debug(result)
    if result["tagUpdate"] is None:
        log.info("Failed to update, check for existing tags named:")
        log.info(aliases)
    return result
예제 #16
0
    def search_performers(self, search, numResults=20):
        query = """
      query SearchActors(
        $query: String!
        $limit: Int = 20
      ) {
        actors: searchActors(
          query: $query
          first: $limit
        ) {
          ...traxActor
        }
      }
    """

        variables = {'query': search, 'limit': int(numResults)}

        results = self.__callGraphQL(query, variables).get("actors")
        log.info(
            f'performer search "{search}" returned {len(results)} results')
        return [p for p in results]
  def get_scene_by_shootID(self, shootId):
    query = """
      query Releases(
        $idList: [String!]
      ){ releases( filter: {shootId: { in:$idList } }
        ){
          ...traxScene
        }
      }
    """

    variables = {'idList': [shootId]}

    response = self.__callGraphQL(query, variables).get("releases")

    log.info(f'scene shootID lookup "{shootId}" returned {len(response)} results')

    if len(response) > 0:
      return response[0]
    else:
      return None
예제 #18
0
def tagclean():
    log.info("Cleaning tags...")
    query = """
          query findmytags {
            allTags {
              name,
              id,
              aliases
            }
          }
            """
    variables = {}
    results = call_graphql(query, variables)
    alltags = results["allTags"]

    for item in alltags:
        name = item["name"]
        id = item["id"]
        # strip commas and spaces
        cleanname = name.replace(',', "").strip()
        # if it's been cleaned OR it's not in title case (ie is lower case or upper), we'll assume it's potentially merge-able
        if name.title() != cleanname:
            for correctitem in alltags:
                correctid = correctitem["id"]
                if correctid == id:
                    #same item - don't match
                    continue
                else:
                    if correctitem["name"].title() == cleanname.title():
                        log.info("fixing: @" + name + "@ ->" +
                                 correctitem["name"])
                        merge_tag(id, correctid)
                    else:
                        for correctalias in correctitem["aliases"]:
                            if correctalias.title() == cleanname.title():
                                log.info("fixing: @" + name + "@ ->" +
                                         correctitem["name"])
                                merge_tag(id, correctid)
    return
    # should be tagged anyway if yes
    # if parse_right("Animal Ears") == "Yes":
    #     result["tags"] += [{"name": "performer:animal ears"}]
    hair_length = parse_right("Hair Length")
    if include_hair_length and hair_length:
        result["tags"] += [{"name": hair_length_prefix + hair_length}]
    apparent_age = parse_right("Apparent Age")
    if include_apparent_age and apparent_age:
        result["tags"] += [{"name": apparent_age_prefix + apparent_age}]
    result["gender"] = parse_right("Gender")
    result["eye_color"] = parse_right("Eye Color")
    result["hair_color"] = parse_right("Hair Color")

    return result


# read the input
i = readJSONInput()

if sys.argv[1] == "performerByURL":
    url = i["url"]
    result = performerByURL(url)
    print(json.dumps(result))
elif sys.argv[1] == "performerByName":
    name = i["name"]
    log.info(f"Searching for name: {name}")
    results = performerByName(name)[:limit]
    results = addFranchise(name, results)
    print(json.dumps(results))
예제 #20
0
        id = item["id"]
        # strip commas and spaces
        cleanname = name.replace(',', "").strip()
        # if it's been cleaned OR it's not in title case (ie is lower case or upper), we'll assume it's potentially merge-able
        if name.title() != cleanname:
            for correctitem in alltags:
                correctid = correctitem["id"]
                if correctid == id:
                    #same item - don't match
                    continue
                else:
                    if correctitem["name"].title() == cleanname.title():
                        log.info("fixing: @" + name + "@ ->" +
                                 correctitem["name"])
                        merge_tag(id, correctid)
                    else:
                        for correctalias in correctitem["aliases"]:
                            if correctalias.title() == cleanname.title():
                                log.info("fixing: @" + name + "@ ->" +
                                         correctitem["name"])
                                merge_tag(id, correctid)
    return


FRAGMENT = json.loads(sys.stdin.read())
tagclean()
log.info("finished running tag clean")
print(json.dumps({}))

# Last Updated April 05, 2022
예제 #21
0
def bulk_submit(scene):
    result = {}
    SubmittedTag = "4179"
    NeedsStashID = "4178"
    if SubmittedTag in get_id(scene["tags"]):
      log.info("Scene already submitted")
      return result
    oktosubmit = True
    if scene["stash_ids"] == []:
      # check performers are all stashided
      for performer in scene["performers"]:
         if performer["stash_ids"] == []:
           oktosubmit = False
           log.info(performer["name"] + " needs a StashID")
           # tag performer to fix them
           tagging = {}
           tagging["id"] = performer["id"]
           tagging["tag_ids"] = []
           tagging["tag_ids"].append(NeedsStashID)
           query = """
            mutation tagforlater($input: PerformerUpdateInput!) {
                  performerUpdate(input: $input) { 
                   name 
                  }
            }
            """
           variables = {
             "input": tagging
           }
           result = call_graphql(query, variables)
           if result:
             log.info(result)

      if oktosubmit:
        submission = {}
        submission["id"] = scene["id"]
        submission["stash_box_index"] = 0
        query = """
            mutation BulkSubmitScene($input: StashBoxDraftSubmissionInput!) {
              submitStashBoxSceneDraft(input: $input)
            }
            """
        variables = {
          "input": submission
        }
        result = []
        result = call_graphql(query, variables)
        if result:
          log.info("Scene submitted as draft")
          log.info(result)
          tagging = {}
          tagging["id"] = scene["id"]
          tagging["tag_ids"] = []
          tagging["tag_ids"].append(SubmittedTag)
          query = """
            mutation tagsubmitted($input: SceneUpdateInput!) {
                  sceneUpdate(input: $input) {
                   title
                  }
            }
            """
          variables = {
             "input": tagging
          }
          result = call_graphql(query, variables)
          if result:
             log.debug(result)

    else:
       log.info("already has Stash id, not resubmitted")
    return result
예제 #22
0
    # if the first character is $, filter will be ignored.
    if search_query[0] != "$":
        # make sure longer matches are filtered first
        studios_sorted = sortByLength(studios)
        for x in studios_sorted:
            if x.id.lower() in search_query:
                filter.append(x.id.lower())
                continue
            # remove the studio from the search result
            search_query = search_query.replace(x.id.lower(), "")
    else:
        search_query = search_query[1:]

    if filter:
        log.info(f"Filter: {filter} applied")

    log.debug(f"Query: '{search_query}'")

    for x in studios:
        if filter:
            if x.id.lower() not in filter:
                #log.debug(f"[Filter] {x.id} ignored")
                continue
        s = x.getSearchResult(search_query)
        # merge all list into one
        if s:
            lst.extend(s)
    #log.debug(f"{json.dumps(lst)}")
    print(json.dumps(lst))
    sys.exit(0)
예제 #23
0
def json_parser(search_json, range_duration=60, single=False):
    result_dict = {}
    # Just for not printing the full JSON in log...
    debug_dict = {}
    with open("adultime_scene_search.json", 'w', encoding='utf-8') as f:
        json.dump(search_json, f, ensure_ascii=False, indent=4)
    for scene in search_json:
        r_match = match_result(scene, range_duration, single)
        if r_match["info"]:
            if result_dict.get(r_match["info"]):
                # Url should be more accurate than the title
                if r_match["url"] > result_dict[r_match["info"]]["url"]:
                    result_dict[r_match["info"]] = {
                        "title": r_match["title"],
                        "url": r_match["url"],
                        "json": scene
                    }
                    debug_dict[r_match["info"]] = {
                        "title": r_match["title"],
                        "url": r_match["url"],
                        "scene": scene["title"]
                    }
                elif r_match["title"] > result_dict[r_match["info"]]["title"] and r_match[
                        "title"] > result_dict[r_match["info"]]["url"]:
                    result_dict[r_match["info"]] = {
                        "title": r_match["title"],
                        "url": r_match["url"],
                        "json": scene
                    }
                    debug_dict[r_match["info"]] = {
                        "title": r_match["title"],
                        "url": r_match["url"],
                        "scene": scene["title"]
                    }
            else:
                result_dict[r_match["info"]] = {
                    "title": r_match["title"],
                    "url": r_match["url"],
                    "json": scene
                }
                debug_dict[r_match["info"]] = {
                    "title": r_match["title"],
                    "url": r_match["url"],
                    "scene": scene["title"]
                }
    # Engine whoaaaaa
    # A = ByID/Most likely | S = Size | D = Duration | N = Network | R = Only Ratio
    log.info("--- BEST RESULT ---")
    for key, item in debug_dict.items():
        log.info(
            "[{}] Title: {}; Ratio Title: {} - URL: {}".format(
                key, item["scene"], round(item["title"], 3), round(item["url"], 3)))
    log.info("--------------")
    #
    if result_dict.get("ASDN"):
        return result_dict["ASDN"]["json"]
    elif result_dict.get("ASD"):
        return result_dict["ASD"]["json"]
    elif result_dict.get("ASN"):
        return result_dict["ASN"]["json"]
    elif result_dict.get("ADN"):
        return result_dict["ADN"]["json"]
    elif result_dict.get("AS"):
        return result_dict["AS"]["json"]
    elif result_dict.get("AD"):
        return result_dict["AD"]["json"]
    elif result_dict.get("AN"):
        if result_dict["AN"]["title"] > 0.5 or result_dict["AN"]["url"] > 0.5:
            return result_dict["AN"]["json"]
    elif result_dict.get("A"):
        if result_dict["A"]["title"] > 0.7 or result_dict["A"]["url"] > 0.7:
            return result_dict["A"]["json"]
    #
    elif result_dict.get("SDN"):
        return result_dict["SDN"]["json"]
    elif result_dict.get("SD"):
        return result_dict["SD"]["json"]
    elif result_dict.get("SN"):
        if result_dict["SN"]["title"] > 0.5 or result_dict["SN"]["url"] > 0.5:
            return result_dict["SN"]["json"]
    elif result_dict.get("DN"):
        if result_dict["DN"]["title"] > 0.5 or result_dict["DN"]["url"] > 0.5:
            return result_dict["DN"]["json"]
    elif result_dict.get("S"):
        if result_dict["S"]["title"] > 0.7 or result_dict["S"]["url"] > 0.7:
            return result_dict["S"]["json"]
    elif result_dict.get("D"):
        if result_dict["D"]["title"] > 0.7 or result_dict["D"]["url"] > 0.7:
            return result_dict["D"]["json"]
    #
    elif result_dict.get("N"):
        if result_dict["N"]["title"] > 0.7 or result_dict["N"]["url"] > 0.7:
            return result_dict["N"]["json"]
    elif result_dict.get("R"):
        if result_dict["R"]["title"] > 0.8 or result_dict["R"]["url"] > 0.8:
            return result_dict["R"]["json"]
    return None
예제 #24
0
            database_dict = database_dict["file"]
        log.debug("[DATABASE] Info: {}".format(database_dict))
    else:
        database_dict = None
        log.debug("URL scraping... Ignoring database...")
else:
    database_dict = None
    log.warning("Database path missing.")

# Extract things
url_title = None
url_id = None
url_domain = None
if SCENE_URL:
    url_domain = re.sub(r"www\.|\.com", "", urlparse(SCENE_URL).netloc)
    log.info("URL Domain: {}".format(url_domain))
    url_id_check = re.sub('.+/', '', SCENE_URL)
    # Gettings ID
    try:
        if url_id_check.isdigit():
            url_id = url_id_check
        else:
            url_id = re.search(r"/(\d+)/*", SCENE_URL).group(1)
        log.info("ID: {}".format(url_id))
    except:
        log.warning("Can't get ID from URL")
    # Gettings url_title
    try:
        url_title = re.match(r".+/(.+)/\d+", SCENE_URL).group(1)
        log.info("URL_TITLE: {}".format(url_title))
    except: