def search_anime_in_erina_database():
        if SearchConfig.thresholds.erina_similarity > 98.4375:
            for anime in os.listdir(erina_db_path):
                if anime in ['.DS_Store', ".gitkeep"]:
                    continue
                else:
                    for folder in os.listdir(erina_db_path + anime):
                        if anime == '.DS_Store':
                            continue
                        if os.path.isfile(erina_db_path + anime + '/' +
                                          folder + '/' + str(image_hash) +
                                          '.erina'):
                            StatsAppend(DatabaseStats.erinaDatabaseLookups, 1)
                            return parser.ErinaFile(
                                "erina_database", anime + '/' + folder + '/' +
                                str(image_hash) + '.erina'
                            ).content, 100, anime + '/' + folder + '/' + str(
                                image_hash) + '.erina'
        else:

            distance_dict = {}
            iteration = 0
            for anime in os.listdir(erina_db_path):
                if anime in ['.DS_Store', ".gitkeep"]:
                    continue
                else:
                    for folder in os.listdir(erina_db_path + anime):
                        if folder == '.DS_Store':
                            continue
                        for file in os.listdir(erina_db_path + anime + '/' +
                                               folder):
                            if file == '.DS_Store':
                                continue
                            iteration += 1
                            distance = hamming_distance(
                                file.replace('.erina', ''), str(image_hash))
                            if distance == 1:
                                return parser.ErinaFile(
                                    "erina_database", anime + '/' + folder +
                                    '/' + file).content, (
                                        1 - (1 / 64)
                                    ) * 100, anime + '/' + folder + '/' + str(
                                        image_hash) + '.erina'
                            else:
                                distance_dict[anime + '/' + folder + '/' +
                                              file] = distance
            StatsAppend(DatabaseStats.erinaDatabaseLookups, iteration)
            threshold = int(
                (SearchConfig.thresholds.erina_similarity * 64) / 100)
            similarities = list(range(2, len(list(range(threshold, 64)))))
            for distance in similarities:
                for element in distance_dict:
                    if distance_dict[element] == distance:
                        return parser.ErinaFile(
                            "erina_database", element).content, (
                                1 - (distance / 64)
                            ) * 100, anime + '/' + folder + '/' + str(
                                image_hash) + '.erina'
        return None, None, None
Beispiel #2
0
def check(user_id):
    '''
    Add an entry to check.
    '''
    global current_images_dict
    current_images_dict[user_id] = time.time()
    StatsAppend(LineStats.storedImages, len(filecenter.files_in_dir(images_path)))
Beispiel #3
0
def anilist_api_search(query_string):
    """
    Internal Function for the cache API of Erina to request a search from the AniList API (GraphQL)
    """
    # query is the response structure requested to the GraphQL AniList API (→ See GraphQL and AniList API documentation to learn more about this data structure)
    query = '''query ($search: String) {
    anime: Page(perPage: 1) {
        results: media(type: ANIME, search: $search)''' + anilistApiQuery + '''
    }
}
    '''
    variables = {'search': query_string}
    response = requests.post(url='https://graphql.anilist.co',
                             json={
                                 'query': query,
                                 'variables': variables
                             })
    StatsAppend(ExternalStats.anilistAPICalls, f"Query: {str(query_string)}")
    if response.status_code == 200:
        anilistResponse = json.loads(response.text.replace(
            'null', '""'))['data']['anime']['results']
        if len(anilistResponse) > 0:
            return anilistResponse[0]
        else:
            return {"errors": [{"message": "Not found.", "status": 404}]}
    else:
        return json.loads(response.text)
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "Erina")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")

        self.path = None
        self.anilist_id = None
        self.hash = None
        self.similarity = None
        self.cache_timestamp = None

        for element in self.data:
            element = str(element).replace("\n", "")
            if element[:5] == 'Path:':
                self.path = str(element[6:])
            elif element[:11] == 'AniList ID:':
                self.anilist_id = utils.convert_to_int(element[12:])
            elif element[:5] == 'Hash:':
                self.hash = str(element[6:])
            elif element[:11] == 'Similarity:':
                self.similarity = utils.convert_to_float(element[12:])
            elif element[:16] == 'Cache Timestamp:':
                self.cache_timestamp = self.CacheTimestamp(element[17:])
Beispiel #5
0
 def __init__(self, type, message) -> None:
     self.type = str(type)
     self.message = str(message)
     self.timestamp = time()
     self.datetime = datetime.fromtimestamp(self.timestamp)
     self.formatted_timestamp = f"{str(self.datetime.year)}-{str(self.datetime.month)}-{str(self.datetime.day)} at {str(self.datetime.hour)}:{str(self.datetime.minute)}:{str(self.datetime.second)}"
     log("ErinaLine", self.type + ": " + self.message, error=True)
     StatsAppend(erina.errorsCount, "ErinaLine")
def searchAnime(query):
    """
    Searches an anime by its title

    Erina Project — 2020\n
    © Anime no Sekai
    """
    log("ErinaSearch", "Searching for " + str(query) + "... (title search)")
    StatsAppend(SearchStats.titleSearchCount, query)
    return title_search.searchAnime(query)
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "SauceNAO")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")

        self.similarity = None
        self.database = None
        self.title = None
        self.link = None
        self.author = None
        self.thumbnail = None
        self.is_manga = None
        self.is_anime = None
        self.part = None
        self.year = None
        self.timing = None
        self.cache_timestamp = None

        for element in self.data:
            element = str(element).replace("\n", "")

            if element[:11] == 'Similarity:':
                self.similarity = utils.convert_to_float(element[12:])
            elif element[:9] == 'Index ID:':
                self.database = self.Index(element[10:])
            elif element[:6] == 'Title:':
                self.title = self.AnimeTitle(native_title=element[7:])
            elif element[:4] == 'URL:':
                self.link = str(element[5:])
            elif element[:7] == 'Author:':
                self.author = str(element[8:])
            elif element[:10] == 'Thumbnail:':
                self.thumbnail = str(element[11:])
            elif element[:8] == 'isManga:':
                self.is_manga = utils.convert_to_boolean(element[9:])
            elif element[:5] == 'Part:':
                self.part = utils.convert_to_int(element[6:])
            elif element[:8] == 'isAnime:':
                self.is_anime = utils.convert_to_boolean(element[9:])
            elif element[:
                         8] == 'Episode:':  #### COMPATIBILITY WITH OLD SAUCENAO CACHE
                self.part = utils.convert_to_int(element[9:])
            elif element[:5] == 'Year:':
                self.year = utils.convert_to_int(element[6:])
            elif element[:15] == 'Estimated Time:':
                self.timing = self.Timing(from_time=element[16:],
                                          to=element[16:],
                                          at=element[16:])
            elif element[:16] == 'Cache Timestamp:':
                self.cache_timestamp = self.CacheTimestamp(element[17:])
def anilistIDSearch(anilistID):
    """
    Searches an anime from AniList Caches or AniList API
    
    Erina Project — 2020\n
    © Anime no Sekai
    """
    log("ErinaSearch",
        "Searching for " + str(anilistID) + "... (anilist id search)")
    StatsAppend(SearchStats.anilistIDSearchCount, anilistID)
    return anilist_id_search.search_anime_by_anilist_id(anilistID)
def checkForUpdate():
    """
    Checks infinitely for any update
    """
    while True:
        verify_manami_adb()
        numberOfCaches = 0
        for path in caches_path:
            numberOfCaches += len(files_in_dir(path))
        StatsAppend(erina.cacheFilesCount, numberOfCaches)
        sleep(86400) # Checks every day
def imageSearch(image):
    """
    Searches an anime from an image (anime scene for example)

    image: Can be an URL, a path, a base64 encoded string or a PIL.Image.Image instance

    Erina Project — 2020\n
    © Anime no Sekai
    """
    log("ErinaSearch", "Searching for an image...")
    StatsAppend(SearchStats.imageSearchCount, None)
    return hash_search.search_anime_by_hash(erinahash.hash_image(image))
Beispiel #11
0
def base64_from_image(image_path):
    """
    Converts an image to base64
    
    Erina Project — 2020\n
    © Anime no Sekai
    """
    log("ErinaHash", "Converting " + str(image_path) + " to base64...")
    image_content = BinaryFile(image_path).read()
    result = base64.b64encode(image_content).decode("utf-8")
    StatsAppend(ErinaHashStats.createdBase64String,
                f"New Base64 String (length: {str(len(result))})")
    return result
Beispiel #12
0
def checkImages():
    '''
    Timeout checking function.
    '''
    global current_images_dict
    number_of_deleted_files = 0 # logging purposes
    for entry in current_images_dict:
        if time.time() - current_images_dict[entry] > LineConfig.images_timeout:
            if filecenter.delete(images_path + entry + '.erina_image') == 0:
                current_images_dict.pop(entry, None)
                number_of_deleted_files += 1 # logging purposes
    ### LOGGING
    if number_of_deleted_files > 0:
        if number_of_deleted_files == 1:
            log("ErinaLine", "[Image Checker] Deleted 1 entry")
        else:
            log("ErinaLine", f'[Image Checker] Deleted {str(number_of_deleted_files)} entries')
        StatsAppend(LineStats.storedImages, len(filecenter.files_in_dir(images_path)))
Beispiel #13
0
def search(query):
    """
    Finds the most similar title
    """
    results_dict = {}
    InputVector = StringVector(query)
    for vector in Database.vectors:
        summation = sum(
            vector.count[character] * InputVector.count[character]
            for character in vector.set.intersection(InputVector.set))
        length = vector.length * InputVector.length
        similarity = (0 if length == 0 else summation / length)
        results_dict[vector] = similarity
    StatsAppend(DatabaseStats.manamiDBTitleVectorLookups,
                len(Database.vectors))
    bestResult = max(results_dict.items(),
                     key=operator.itemgetter(1))[0]  # Returns the max value
    return Database.vectors[bestResult], results_dict[bestResult]
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "IQDB")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")

        self.tags = None
        self.link = None
        self.title = None
        self.size = None
        self.hentai = None
        self.similarity = None
        self.database = None

        self.author = None

        for element in self.data:
            element = str(element).replace("\n", "")

            if element[:10] == 'IQDB Tags:':
                self.tags = [
                    utils.capitalize_string(tag)
                    for tag in str(element[11:]).split(':::')
                ]
            elif element[:4] == 'URL:':
                self.link = str(element[5:])
            elif element[:6] == 'Title:':
                self.title = str(element[7:])
            elif element[:5] == 'Size:':
                self.size = self.Size(element[6:])
            elif element[:7] == 'isSafe:':
                self.hentai = utils.convert_to_boolean(element[8:])
            elif element[:11] == 'Similarity:':
                self.similarity = utils.convert_to_float(element[12:])
            elif element[:9] == 'Database:':
                self.database = str(element[10:])
            elif element[:16] == 'Cache Timestamp:':
                self.cache_timestamp = self.CacheTimestamp(element[17:])
Beispiel #15
0
def anilist_api(anilist_id):
    """
    Internal Function for the cache API of Erina to request from the AniList API (GraphQL)
    """
    # query is the response structure requested to the GraphQL AniList API (→ See GraphQL and AniList API documentation to learn more about this data structure)
    query = '''
    query ($id: Int) {
        Media(id: $id, type: ANIME)''' + anilistApiQuery + '''
    }
    '''
    variables = {'id': anilist_id}
    response = requests.post(url='https://graphql.anilist.co',
                             json={
                                 'query': query,
                                 'variables': variables
                             })
    StatsAppend(ExternalStats.anilistAPICalls, f"ID: {str(anilist_id)}")
    if response.status_code == 200:
        return json.loads(response.text.replace('null', '""'))['data']['Media']
    else:
        return json.loads(response.text)
def tracemoe_caching(image_hash):
    '''
    Caches the given Trace.moe API response\n
    Project Erina
    © Anime no Sekai - 2020
    '''
    try:
        log("ErinaCaches", f'Caching {str(image_hash)} trace.moe data...')
        try:
            if image_hash.has_url is not None:
                if str(config.Caches.keys.tracemoe).replace(" ", "") in ["None", ""]:
                    requestResponse = json.loads(requests.get('https://trace.moe/api/search?url=' + image_hash.url).text)
                else:
                    requestResponse = json.loads(requests.get('https://trace.moe/api/search?url=' + image_hash.url + '&token=' + str(config.Caches.keys.tracemoe)).text)
            else:
                if str(config.Caches.keys.tracemoe).replace(" ", "") in ["None", ""]:
                    requestResponse = json.loads(requests.post('https://trace.moe/api/search', json={'image': image_hash.base64}))
                else:
                    requestResponse = json.loads(requests.post('https://trace.moe/api/search?token=' + str(config.Caches.keys.tracemoe), json={'image': image_hash.base64}))
        except:
            return CachingError("TRACEMOE_API_RESPONSE", "An error occured while retrieving information from the trace.moe API")
        
        StatsAppend(ExternalStats.tracemoeAPICalls)

        try:
            cache = tracemoe.erina_from_json(requestResponse)
        except:
            print(exc_info()[0])
            print(exc_info()[1])
            print(traceback.print_exception(*exc_info()))
            return CachingError("ERINA_CONVERSION", f"An error occured while converting trace.moe API Data to a caching format ({str(image_hash)})")
        try:
            TextFile(tracemoe_cache_path + str(image_hash) + '.erina', blocking=False).write(cache)
        except:
            return CachingError("FILE_WRITE", f"An error occured while writing out the cache data to a file ({str(image_hash)})")
        return tracemoe_parser.TraceMOECache(cache)
    except:
        return CachingError("UNKNOWN_ERROR", f"An unknown error occured while caching trace.moe API Data ({str(image_hash)})")
Beispiel #17
0
    def __init__(self, hashobj, ImageObj, URL=None) -> None:
        self.ImageHash = hashobj
        self.Image = ImageObj
        self.ImageIO = BytesIO()
        self.Image.convert('RGB')
        try:
            self.Image.save(self.ImageIO, format="PNG")
        except:
            try:
                self.Image.save(self.ImageIO, format="JPEG")
            except:
                pass
        self.hash = str(self.ImageHash)
        self.base64 = str(
            base64.b64encode(self.ImageIO.getvalue()).decode("utf-8"))
        if URL is not None:
            self.has_url = True
            self.url = str(URL)
        else:
            self.has_url = False
            self.url = None

        StatsAppend(ErinaHashStats.createdHashes, self.hash)
Beispiel #18
0
def on_direct_message(message):
    """
    DM Receiving
    """
    directMessagesHistory.append(message)
    log("ErinaTwitter",
        "New direct message from @" + str(message.message_create['sender_id']))
    if Twitter.dmAskingForSauce(message):
        StatsAppend(TwitterStats.directMessagingHit,
                    str(message.message_create['sender_id']))
        image = Twitter.getDirectMedia(message)
        if image is not None:
            searchResult = imageSearch(image)
            ErinaTwitter.dm(makeImageResponse(searchResult),
                            message.message_create['sender_id'])
        elif isAnError(image):
            ErinaTwitter.dm(
                "An error occured while retrieving information on the anime",
                message.message_create['sender_id'])
        else:
            ErinaTwitter.dm(
                "You did not send any image along with your message",
                message.message_create['sender_id'])
def saucenao_caching(image_hash):
    '''
    Caches the result from the given url\n
    Project Erina\n
    © Anime no Sekai - 2020
    '''
    try:
        log("ErinaCaches", f"Caching {str(image_hash)} SauceNAO data...")
        if str(config.Caches.keys.saucenao).replace(" ", "") not in ["None", ""]:
            saucenao_api = SauceNao(api_key=config.Caches.keys.saucenao, numres=1)
        else:
            saucenao_api = SauceNao(numres=1)
        if image_hash.has_url:
            try:
                api_results = saucenao_api.from_url(image_hash.url)[0]
            except:
                return CachingError("SAUCENAO_API_RESPONSE", "An error occured while retrieving SauceNAO API Data")
        else:
            try:
                api_results = saucenao_api.from_file(image_hash.ImageIO)[0]
            except:
                return CachingError("SAUCENAO_API_RESPONSE", "An error occured while retrieving SauceNAO API Data")
        
        StatsAppend(ExternalStats.saucenaoAPICalls)

        try:
            cache = saucenao.erina_from_api(api_results)
        except:
            traceback.print_exc()
            return CachingError("ERINA_CONVERSION", "An error occured while converting SauceNAO API Data to a caching format")
        try:
            TextFile(saucenao_cache_path + str(image_hash) + '.erina', blocking=False).write(cache)
        except:
            return CachingError("FILE_WRITE", "An error occured while writing out the cache data to a file")
        return saucenao_parser.SauceNAOCache(cache)
    except:
        return CachingError("UNKNOWN", "An unknown error occured while caching SauceNAO API Data")
Beispiel #20
0
def ErinaServer_Endpoint_API_search():
    cooldown = None
    try:
        if not ServerConfig.public_api:
            if "key" not in request.values:
                return makeResponse(
                    request_args=request.values,
                    cooldown=None,
                    code=400,
                    error="NO_KEY",
                    error_message=
                    "This API is not public and no key got provided along with the request"
                )
            else:
                currentKey = request.values.get("key")
                if not isfile(erina_dir + "/ErinaServer/Erina/auth/apiAuth/" +
                              currentKey + ".erina"):
                    return makeResponse(
                        request_args=request.values,
                        cooldown=None,
                        code=401,
                        error="WRONG_KEY",
                        error_message="The given key isn't registered")
                else:
                    currentAuth = authReader.APIAuth(currentKey)
                    currentAuth.authFile.append(str(time()) + "\n")
                    if currentKey in rate_limiting_api_map:
                        rate = time() - rate_limiting_api_map[currentKey]
                        if rate > currentAuth.rate_limit:
                            rate_limiting_api_map[currentKey] = time()
                        else:
                            return makeResponse(
                                request_args=request.values,
                                cooldown=currentAuth.rate_limit - rate,
                                code=429,
                                error="RATE_LIMITED",
                                error_message=
                                "You have exceeded your rate limit")
                    else:
                        rate_limiting_api_map[currentKey] = time()
                    cooldown = currentAuth.rate_limit

        if "format" in request.values:
            format = request.values.get("format").lower()
        else:
            format = "json"

        if "anilistID" in request.values:
            StatsAppend(
                APIStats.searchEndpointCall,
                f"AniListID >>> {str(request.values.get('anilistID'))}")
            result = erinasearch.anilistIDSearch(
                request.values.get("anilistID"))
            if "client" in request.values:
                if request.values.get("client") == "line":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=LineParser.makeInfoResponse(result))
                elif request.values.get("client") == "discord":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=DiscordParser.makeInfoResponse(result)[2])

        elif "anime" in request.values:
            StatsAppend(
                APIStats.searchEndpointCall,
                f"Anime Search >>> {str(request.values.get('anime'))}")
            result = erinasearch.searchAnime(request.values.get("anime"))
            if "client" in request.values:
                if request.values.get("client") == "line":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=LineParser.makeInfoResponse(result))
                elif request.values.get("client") == "discord":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=DiscordParser.makeInfoResponse(result)[2])

        elif "image" in request.values:
            StatsAppend(APIStats.searchEndpointCall, "Image Search")
            result = erinasearch.imageSearch(request.values.get("image"))
            if "client" in request.values:
                if request.values.get("client") == "twitter":
                    return makeResponse(request_args=request.values,
                                        cooldown=cooldown,
                                        data=TwitterParser.makeTweet(result))
                elif request.values.get("client") == "line":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=LineParser.makeImageResponse(result))
                elif request.values.get("client") == "discord":
                    return makeResponse(
                        request_args=request.values,
                        cooldown=cooldown,
                        data=DiscordParser.makeImageResponse(result)[2])
        else:
            return makeResponse(
                request_args=request.values,
                cooldown=cooldown,
                data={
                    "authorizedArgs": [
                        "anilistID", "anime", "image", "minify", "client",
                        "format"
                    ],
                    "optionalArgs": ["minify", "client", "format"]
                },
                code=400,
                error="MISSING_ARG",
                error_message="An argument is missing from your request")

        if not isAnError(result):
            if format == "text" or format == "html":
                return makeResponse(request_args=request.values,
                                    cooldown=cooldown,
                                    data=result.as_text())
            else:
                return makeResponse(request_args=request.values,
                                    cooldown=cooldown,
                                    data=result.as_dict())
        else:
            if result.type == "ANILIST_NOT_FOUND":
                return makeResponse(
                    request_args=request.values,
                    cooldown=cooldown,
                    code=404,
                    data={
                        "error": result.type,
                        "message": result.message,
                        "timestamp": result.timestamp,
                        "formattedTimestamp": result.formatted_timestamp
                    },
                    error="ANILIST_NOT_FOUND",
                    error_message="AniList could not find your anime")
            return makeResponse(
                request_args=request.values,
                cooldown=cooldown,
                data={
                    "error": result.type,
                    "message": result.message,
                    "timestamp": result.timestamp,
                    "formattedTimestamp": result.formatted_timestamp
                },
                code=500,
                error=result.type,
                error_message=
                "An error occured while retrieving the information")
    except:
        traceback.print_exc()
        return makeResponse(request_args=request.values,
                            cooldown=cooldown,
                            code=500,
                            error=str(exc_info()[0]))
Beispiel #21
0
def search_iqdb(image_hash, image_url='', file_io=None):
    """
    Searches and caches IQDB for anime/manga related images.

    Erina Project - 2020\n
    © Anime no Sekai
    """

    erina_log.logcaches(f'Searching for IQDB Data...', 'iqdb', str(image_hash))
    StatsAppend(ExternalStats.iqdbCalls, "New Call")
    results = {}

    ### If a file is given, send the file to iqdb.
    if file_io is not None:
        response = requests.post('https://iqdb.org/',
                                 files={'file': ('image_to_search', file_io)})
    else:
        if image_url == '':
            erina_log.logerror('[ErinaCaches] [IQDB] No file or URL provided')
            return {'error': 'no file or url provided'}
        else:
            response = request(f'https://iqdb.org/?url={image_url}')

    ### If the image format is not supported by IQDB
    if 'Not an image or image format not supported' in response.text:
        print('Format not supported.')
        erina_log.logerror('[ErinaCaches] [IQDB] Format not supported')
        return {'error': 'format not supported'}

###### IQDB SCRAPING
    iqdb = BeautifulSoup(response.text, 'html.parser')

    ##### Search for the IQDB result
    try:
        tables = iqdb.find_all('table')
        search_result = tables[1].findChildren("th")[0].get_text()
    except Exception as e:
        erina_log.logerror(
            f'[ErinaCaches] [IQDB] Client Error, Error details: {str(e)}')
        return {'error': 'client error', 'error_details': e}

##### Verify if the result is relevant or not
    iqdb_tags = []
    if search_result == 'No relevant matches':
        erina_log.logerror('[ErinaCaches] [IQDB] No relevant matches found')
        return {'error': 'not found'}
    else:
        try:
            ### Getting the tags from IQDB
            alt_string = tables[1].findChildren("img")[0]['alt']
            iqdb_tags = alt_string.split('Tags: ')[1].split(' ')
        except:
            iqdb_tags = []

    #### Getting the Database URL from IQDB
    try:
        url = tables[1].find_all('td',
                                 attrs={'class': 'image'
                                        })[0].findChildren('a')[0]['href']
        url = 'https://' + url.split('//')[1]
    except:
        url = ''

    #### Getting the result image size
    try:
        size = tables[1].find_all('tr')[3].get_text().split(' [')[0]
    except:
        size = ''

    #### Getting the image rating (if it is NSFW or not)
    if tables[1].find_all('tr')[3].get_text().split()[1].replace(
            '[', '').replace(']', '').replace(' ', '') == 'Safe':
        is_safe = True
    else:
        is_safe = False

    #### Getting the similarity
    try:
        similarity = tables[1].find_all('tr')[4].get_text().replace(
            '% similarity', '')
    except:
        similarity = ''

    #### Adding the results to the main result variable
    results['iqdb_tags'] = iqdb_tags
    results['url'] = url
    results['size'] = size
    results['is_safe'] = is_safe
    results['similarity'] = similarity

    ############ FUNCTION DEFINITION FOR RESULTS SCRAPING
    if url.find('gelbooru.') != -1:
        results['database'] = 'gelbooru'
        results['gelbooru_results'] = search_gelbooru(url)

    elif url.find('danbooru.') != -1:
        results['database'] = 'danbooru'
        results['danbooru_results'] = search_danbooru(url)

    elif url.find('zerochan.') != -1:
        results['database'] = 'zerochan'
        results['zerochan_results'] = search_zerochan(url)

    elif url.find('konachan.') != -1:
        results['database'] = 'konachan'
        results['konachan_results'] = search_konachan(url)

    elif url.find('yande.re') != -1:
        results['database'] = 'yandere'
        results['yandere_results'] = search_yandere(url)

    elif url.find('anime-pictures.') != -1:
        results['database'] = 'anime_pictures'
        results['anime_pictures_results'] = search_animepictures(url)

    elif url.find('e-shuushuu') != -1:
        results['database'] = 'e_shuushuu'
        results['e_shuushuu_results'] = search_eshuushuu(url)

#################### CACHING ##########

    new_cache_content = []
    new_cache_content.append('   --- IQDB CACHE ---   ')
    new_cache_content.append('')

    new_cache_content.append('IQDB Tags: ' +
                             create_erina_list(results['iqdb_tags']))
    new_cache_content.append('URL: ' + results['url'])
    new_cache_content.append('Size: ' + results['size'])
    new_cache_content.append('isSafe: ' + str(results['is_safe']))
    new_cache_content.append('Similarity: ' + results['similarity'])
    new_cache_content.append('Database: ' + results['database'])
    new_cache_content.append('')

    if results['database'] == 'gelbooru':

        new_cache_content.append(
            'Gelbooru Characters: ' +
            create_erina_list(results['gelbooru_results']['characters']))
        new_cache_content.append(
            'Gelbooru Copyrights: ' +
            create_erina_list(results['gelbooru_results']['copyrights']))
        new_cache_content.append(
            'Gelbooru Metadatas: ' +
            create_erina_list(results['gelbooru_results']['metadatas']))
        new_cache_content.append(
            'Gelbooru Tags: ' +
            create_erina_list(results['gelbooru_results']['tags']))

        new_cache_content.append('Gelbooru ID: ' +
                                 results['gelbooru_results']['id'])
        new_cache_content.append('Gelbooru Size: ' +
                                 results['gelbooru_results']['size'])
        new_cache_content.append('Gelbooru Source: ' +
                                 results['gelbooru_results']['source'])
        new_cache_content.append('Gelbooru Rating: ' +
                                 results['gelbooru_results']['rating'])
        new_cache_content.append('Gelbooru Date: ' +
                                 results['gelbooru_results']['date'])
        new_cache_content.append('Gelbooru Uploader: ' +
                                 results['gelbooru_results']['uploader'])
        new_cache_content.append('Gelbooru Score: ' +
                                 results['gelbooru_results']['score'])

    elif results['database'] == 'danbooru':

        new_cache_content.append(
            'Danbooru Artists: ' +
            create_erina_list(results['danbooru_results']['artists']))
        new_cache_content.append(
            'Danbooru Characters: ' +
            create_erina_list(results['danbooru_results']['characters']))
        new_cache_content.append(
            'Danbooru Copyrights: ' +
            create_erina_list(results['danbooru_results']['copyrights']))
        new_cache_content.append(
            'Danbooru Metadatas: ' +
            create_erina_list(results['danbooru_results']['metadatas']))
        new_cache_content.append(
            'Danbooru Tags: ' +
            create_erina_list(results['danbooru_results']['tags']))

        new_cache_content.append('Danbooru ID: ' +
                                 results['danbooru_results']['id'])
        new_cache_content.append('Danbooru Uploader: ' +
                                 results['danbooru_results']['uploader'])
        new_cache_content.append('Danbooru Date: ' +
                                 results['danbooru_results']['date'])
        new_cache_content.append('Danbooru Content Size: ' +
                                 results['danbooru_results']['content_size'])
        new_cache_content.append('Danbooru Format: ' +
                                 results['danbooru_results']['format'])
        new_cache_content.append('Danbooru Size: ' +
                                 results['danbooru_results']['size'])
        new_cache_content.append('Danbooru Source: ' +
                                 results['danbooru_results']['source'])
        new_cache_content.append('Danbooru Rating: ' +
                                 results['danbooru_results']['rating'])
        new_cache_content.append('Danbooru Score: ' +
                                 results['danbooru_results']['score'])
        new_cache_content.append('Danbooru Favorites: ' +
                                 results['danbooru_results']['favorites'])
        new_cache_content.append('Danbooru Status: ' +
                                 results['danbooru_results']['status'])

    elif results['database'] == 'zerochan':
        new_cache_content.append('Zerochan ID: ' +
                                 results['zerochan_results']['id'])
        new_cache_content.append('Zerochan Uploader: ' +
                                 results['zerochan_results']['uploader'])
        new_cache_content.append('Zerochan Content URL: ' +
                                 results['zerochan_results']['content_url'])
        new_cache_content.append('Zerochan Thumbnail: ' +
                                 results['zerochan_results']['thumbnail'])
        new_cache_content.append('Zerochan Format: ' +
                                 results['zerochan_results']['format'])
        new_cache_content.append('Zerochan Post Date: ' +
                                 results['zerochan_results']['post_date'])
        new_cache_content.append('Zerochan Name: ' +
                                 results['zerochan_results']['name'])
        new_cache_content.append('Zerochan Width: ' +
                                 results['zerochan_results']['width'])
        new_cache_content.append('Zerochan Height: ' +
                                 results['zerochan_results']['height'])
        new_cache_content.append('Zerochan Content Size: ' +
                                 results['zerochan_results']['content_size'])
        new_cache_content.append('Zerochan Mangaka: ' +
                                 results['zerochan_results']['mangaka'])
        new_cache_content.append('Zerochan Series: ' +
                                 results['zerochan_results']['series'])
        new_cache_content.append('Zerochan Character: ' +
                                 results['zerochan_results']['character'])
        new_cache_content.append('Zerochan Source: ' +
                                 results['zerochan_results']['source'])

    elif results['database'] == 'konachan':

        new_cache_content.append(
            'Konachan Copyrights: ' +
            create_erina_list(results['konachan_results']['copyrights']))
        new_cache_content.append(
            'Konachan Styles: ' +
            create_erina_list(results['konachan_results']['styles']))
        new_cache_content.append(
            'Konachan Artists: ' +
            create_erina_list(results['konachan_results']['artists']))
        new_cache_content.append(
            'Konachan Characters: ' +
            create_erina_list(results['konachan_results']['characters']))
        new_cache_content.append(
            'Konachan Tags: ' +
            create_erina_list(results['konachan_results']['tags']))
        new_cache_content.append(
            'Konachan Favorited By: ' +
            create_erina_list(results['konachan_results']['favorited_by']))

        new_cache_content.append('Konachan ID: ' +
                                 results['konachan_results']['id'])
        new_cache_content.append('Konachan Size: ' +
                                 results['konachan_results']['size'])
        new_cache_content.append('Konachan Source: ' +
                                 results['konachan_results']['source'])
        new_cache_content.append('Konachan Rating: ' +
                                 results['konachan_results']['rating'])
        new_cache_content.append('Konachan Date: ' +
                                 results['konachan_results']['date'])
        new_cache_content.append('Konachan Uploader: ' +
                                 results['konachan_results']['uploader'])
        new_cache_content.append('Konachan Score: ' +
                                 results['konachan_results']['score'])

    elif results['database'] == 'yandere':

        new_cache_content.append(
            'Yandere Copyrights: ' +
            create_erina_list(results['yandere_results']['copyrights']))
        new_cache_content.append(
            'Yandere Styles: ' +
            create_erina_list(results['yandere_results']['styles']))
        new_cache_content.append(
            'Yandere Artists: ' +
            create_erina_list(results['yandere_results']['artists']))
        new_cache_content.append(
            'Yandere Characters: ' +
            create_erina_list(results['yandere_results']['characters']))
        new_cache_content.append(
            'Yandere Tags: ' +
            create_erina_list(results['yandere_results']['tags']))
        new_cache_content.append(
            'Yandere Favorited By: ' +
            create_erina_list(results['yandere_results']['favorited_by']))

        new_cache_content.append('Yandere ID: ' +
                                 results['yandere_results']['id'])
        new_cache_content.append('Yandere Size: ' +
                                 results['yandere_results']['size'])
        new_cache_content.append('Yandere Source: ' +
                                 results['yandere_results']['source'])
        new_cache_content.append('Yandere Rating: ' +
                                 results['yandere_results']['rating'])
        new_cache_content.append('Yandere Date: ' +
                                 results['yandere_results']['date'])
        new_cache_content.append('Yandere Uploader: ' +
                                 results['yandere_results']['uploader'])
        new_cache_content.append('Yandere Score: ' +
                                 results['yandere_results']['score'])

    elif results['database'] == 'anime_pictures':

        new_cache_content.append('Anime-Pictures ID: ' +
                                 results['anime_pictures_results']['id'])
        new_cache_content.append('Anime-Pictures Uploader: ' +
                                 results['anime_pictures_results']['uploader'])
        new_cache_content.append(
            'Anime-Pictures Last Editing User: '******'anime_pictures_results']['last_editing_user'])
        new_cache_content.append(
            'Anime-Pictures Post Date: ' +
            results['anime_pictures_results']['post_date'])
        new_cache_content.append(
            'Anime-Pictures Published Date: ' +
            results['anime_pictures_results']['published_date'])
        new_cache_content.append(
            'Anime-Pictures Download Count: ' +
            results['anime_pictures_results']['download_count'])
        new_cache_content.append('Anime-Pictures Size: ' +
                                 results['anime_pictures_results']['size'])
        new_cache_content.append(
            'Anime-Pictures Aspect Ratio: ' +
            results['anime_pictures_results']['aspect_ratio'])
        new_cache_content.append(
            'Anime-Pictures Content Size: ' +
            results['anime_pictures_results']['content_size'])
        new_cache_content.append(
            'Anime-Pictures Artefacts Degree: ' +
            results['anime_pictures_results']['artefacts_degree'])
        new_cache_content.append(
            'Anime-Pictures Smooth Degree: ' +
            results['anime_pictures_results']['smoothness_degree'])
        new_cache_content.append(
            'Anime-Pictures Complexity: ' +
            results['anime_pictures_results']['complexity'])
        new_cache_content.append(
            'Anime-Pictures Copyright: ' +
            results['anime_pictures_results']['copyright'])
        new_cache_content.append('Anime-Pictures Artist: ' +
                                 results['anime_pictures_results']['artist'])
        new_cache_content.append(
            'Anime-Pictures Average Color: ' + create_erina_list(
                results['anime_pictures_results']['average_color']))
        new_cache_content.append(
            'Anime-Pictures References: ' +
            create_erina_list(results['anime_pictures_results']['references']))
        new_cache_content.append(
            'Anime-Pictures Objects: ' +
            create_erina_list(results['anime_pictures_results']['objects']))
        new_cache_content.append(
            'Anime-Pictures Similar Images: ' + create_erina_list(
                results['anime_pictures_results']['similar_images_id']))
        new_cache_content.append(
            'Anime-Pictures Artist Links: ' + create_erina_list(
                results['anime_pictures_results']['artist_links']))

    elif results['database'] == 'e_shuushuu':

        new_cache_content.append('E-Shuushuu Posting Uploader: ' +
                                 results['e_shuushuu_results']['uploader'])
        new_cache_content.append('E-Shuushuu Posting Post Date: ' +
                                 results['e_shuushuu_results']['post_date'])
        new_cache_content.append('E-Shuushuu Posting Filename: ' +
                                 results['e_shuushuu_results']['filename'])
        new_cache_content.append(
            'E-Shuushuu Posting Original Filename: ' +
            results['e_shuushuu_results']['original_filename'])
        new_cache_content.append('E-Shuushuu Posting Content Size: ' +
                                 results['e_shuushuu_results']['content_size'])
        new_cache_content.append('E-Shuushuu Posting Size: ' +
                                 results['e_shuushuu_results']['size'])
        new_cache_content.append('E-Shuushuu Posting Favorites: ' +
                                 results['e_shuushuu_results']['favorites'])
        new_cache_content.append('E-Shuushuu Posting Image Rating: ' +
                                 results['e_shuushuu_results']['image_rating'])

        new_cache_content.append(
            'E-Shuushuu Tags: ' +
            create_erina_list(results['e_shuushuu_results']['tags']))
        new_cache_content.append(
            'E-Shuushuu Sources: ' +
            create_erina_list(results['e_shuushuu_results']['sources']))
        new_cache_content.append(
            'E-Shuushuu Characters: ' +
            create_erina_list(results['e_shuushuu_results']['characters']))
        new_cache_content.append(
            'E-Shuushuu Artists: ' +
            create_erina_list(results['e_shuushuu_results']['artists']))

    new_cache_content.append('')
    new_cache_content.append('Cache Timestamp: ' +
                             str(datetime.timestamp(datetime.today())))
    new_cache_content.append('Cache Timestamp (formatted): ' + today() +
                             ' at ' + current_time())

    new_cache_destination = env_information.erina_dir + '/ErinaCaches/IQDB_Cache/'
    new_cache_filename = str(image_hash) + '.erina'
    erina_log.logcaches(f'Caching IQDB and {results["database"]} data...')
    write_file(file_title=new_cache_filename,
               text=new_cache_content,
               destination=new_cache_destination)
    return results
def iqdb_caching(image_hash):
    """
    Searches and caches IQDB for anime/manga related images.

    Erina Project - 2020\n
    © Anime no Sekai
    """
    try:
        log("ErinaCaches", 'Searching for IQDB Data...')
        
        ### If a file is given, send the file to iqdb.
        try:
            if image_hash.has_url:
                IQDBresponse = requests.get(f'https://iqdb.org/?url={image_hash.url}')
                StatsAppend(ExternalStats.iqdbCalls, "New Call")
            else:
                IQDBresponse = requests.post('https://iqdb.org/', files={'file': ('image_to_search',  image_hash.ImageIO) })
                StatsAppend(ExternalStats.iqdbCalls, "New Call")
        except:
            return CachingError("IQDB_RESPONSE", "An error occured while retrieving IQDB Data")

        ### If the image format is not supported by IQDB
        if 'Not an image or image format not supported' in IQDBresponse.text:
            return CachingError("IQDB_FORMAT_NOT_SUPPORTED", "The given image's format is not supported by IQDB")


    ###### IQDB SCRAPING
        try:
            iqdb = BeautifulSoup(IQDBresponse.text, 'html.parser')

        ##### Search for the IQDB result
            try:
                tables = iqdb.find_all('table')
                search_result = tables[1].findChildren("th")[0].get_text()
            except:
                return CachingError("IQDB_CLIENT_ERROR", f"An error occured while searching for the results: {exc_info()[0]}")

        ##### Verify if the result is relevant or not
            iqdb_tags = []
            if search_result == 'No relevant matches':
                return CachingError("IQDB_NO_RELEVANT_MATCH", "No relevant matches was found with IQDB", no_log=True)
            else:
                try:
                    ### Getting the tags from IQDB
                    alt_string = tables[1].findChildren("img")[0]['alt']
                    iqdb_tags = alt_string.split('Tags: ')[1].split(' ')
                except:
                    iqdb_tags = []
            
            #### Getting the Database URL from IQDB
            try:
                url = tables[1].find_all('td', attrs={'class': 'image'})[0].findChildren('a')[0]['href']
                url = 'https://' + url.split('//')[1]
            except:
                url = 'No URL'

            #### Getting the result image size
            try:
                size = tables[1].find_all('tr')[3].get_text().split(' [')[0]
            except:
                size = 'Unknown'

            #### Getting the image rating (if it is NSFW or not) 
            if tables[1].find_all('tr')[3].get_text().split()[1].replace('[', '').replace(']', '').replace(' ', '') == 'Safe':
                is_safe = True
            else:
                is_safe = False

            #### Getting the similarity
            try:
                similarity = tables[1].find_all('tr')[4].get_text().replace('% similarity', '')
            except:
                similarity = '0'


        ############ FUNCTION DEFINITION FOR RESULTS SCRAPING
            database = "Unknown"
            if url.find('gelbooru.') != -1:
                database = 'Gelbooru'
            
            elif url.find('danbooru.') != -1:
                database = 'Danbooru'

            elif url.find('zerochan.') != -1:
                database = 'Zerochan'

            elif url.find('konachan.') != -1:
                database = 'Konachan'

            elif url.find('yande.re') != -1:
                database = 'Yande.re'

            elif url.find('anime-pictures.') != -1:
                database = 'Anime-Pictures'

            elif url.find('e-shuushuu') != -1:
                database = 'E-Shuushuu'

            title = "Unknown"
            try:
                databaseWebsiteData = requests.get(url).text
                databaseWebsite = BeautifulSoup(databaseWebsiteData.text, 'html.parser')
                title = databaseWebsite.find("title").get_text()
            except:
                title = "Unkown"
        except:
            return CachingError("IQDB_PARSING", "An error occured while parsing the data from IQDB")

        try:
            #### Adding the results to the main result variable
            newCacheFile = TextFile(erina_dir + "/ErinaCaches/IQDB_Cache/" + str(image_hash) + ".erina")
            newCacheFile.append("   --- IQDB CACHE ---   \n")
            newCacheFile.append('\n')
            
            newCacheFile.append('IQDB Tags: ' + ":::".join(iqdb_tags) + "\n")
            newCacheFile.append('URL: ' + str(url) + "\n")
            newCacheFile.append('Title: ' + str(title) + "\n")
            newCacheFile.append('Size: ' + str(size)  + "\n")
            newCacheFile.append('isSafe: ' + str(is_safe) + "\n")
            newCacheFile.append('Similarity: ' + str(similarity) + "\n")
            newCacheFile.append('Database: ' + str(database) + "\n")
            return iqdb_parser.IQDBCache(newCacheFile.read())
        except:
            return CachingError("FILE_WRITE", f"An error occured while writing out the cache data to a file")
    except:
        return CachingError("UNKNOWN", "An unknown error occured while caching IQDB Data")
Beispiel #23
0
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "trace.moe")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")

        self.anilist_id = None
        self.myanimelist_id = None
        self.title = None
        self.season = None
        self.episode = None
        self.hentai = None
        self.filename = None
        self.timing = None
        self.similarity = None
        self.tokenthumb = None
        self.cache_timestamp = None

        for element in self.data:
            element = str(element).replace("\n", "")

            if element[:11] == 'AniList ID:':
                self.anilist_id = utils.convert_to_int(element[12:])
            elif element[:15] == 'MyAnimeList ID:':
                self.myanimelist_id = utils.convert_to_int(element[16:])

            elif element[:6] == 'Title:':
                self.title = self.AnimeTitle(romaji_title=element[7:])
            elif element[:13] == 'Title Native:':
                if self.title is not None:
                    self.title.addTitle(native_title=element[14:])
                else:
                    self.title = self.AnimeTitle(native_title=element[14:])
            elif element[:14] == 'Title Chinese:':
                if self.title is not None:
                    self.title.addTitle(chinese_title=element[15:])
                else:
                    self.title = self.AnimeTitle(chinese_title=element[15:])
            elif element[:14] == 'Title English:':
                if self.title is not None:
                    self.title.addTitle(english_title=element[15:])
                else:
                    self.title = self.AnimeTitle(english_title=element[15:])
            elif element[:13] == 'Title Romaji:':
                if self.title is not None:
                    self.title.addTitle(romaji_title=element[14:])
                else:
                    self.title = self.AnimeTitle(romaji_title=element[14:])
            elif element[:6] == 'Anime:':
                if self.title is not None:
                    self.title.addAlternativeTitle(element[7:])
                else:
                    self.title = self.AnimeTitle(
                        alternative_titles=element[7:])
            elif element[:9] == 'Synonyms:':
                if self.title is not None:
                    self.title.addAlternativeTitle(element[10:].split(':::'))
                else:
                    self.title = self.AnimeTitle(
                        alternative_titles=element[10:].split(':::'))
            elif element[:17] == 'Synonyms Chinese:':
                if self.title is not None:
                    self.title.addAlternativeTitle(element[18:].split(':::'))
                else:
                    self.title = self.AnimeTitle(
                        alternative_titles=element[18:].split(':::'))
            elif element[:7] == 'Season:':
                self.season = str(element[8:])
            elif element[:8] == 'Episode:':
                self.episode = utils.convert_to_int(element[9:])
            elif element[:8] == 'isAdult:':
                self.hentai = utils.convert_to_boolean(element[9:])
            elif element[:9] == 'Filename:':
                self.filename = str(element[10:])
            elif element[:5] == 'From:':
                if self.timing is None:
                    self.timing = self.Timing(from_time=element[6:])
                else:
                    self.timing.addTiming(from_time=element[6:])
            elif element[:3] == 'To:':
                if self.timing is None:
                    self.timing = self.Timing(to=element[4:])
                else:
                    self.timing.addTiming(to=element[4:])
            elif element[:3] == 'At:':
                if self.timing is None:
                    self.timing = self.Timing(at=element[4:])
                else:
                    self.timing.addTiming(at=element[4:])
            elif element[:11] == 'Similarity:':
                self.similarity = utils.convert_to_float(element[12:])
            elif element[:11] == 'TokenThumb:':
                self.tokenthumb = str(element[12:])
            elif element[:16] == 'Cache Timestamp:':
                self.cache_timestamp = self.CacheTimestamp(element[17:])
Beispiel #24
0
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "AniList")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")
        episode_fallback = 0

        ### Data initialization
        self.anilist_id = None
        self.myanimelist_id = None
        self.title = None
        self.type = None
        self.format = None
        self.status = None
        self.description = None
        self.season = None
        self.year = None
        self.number_of_episodes = None
        self.episode_duration = None
        self.first_episode_release_date = None
        self.last_episode_release_date = None
        self.country = None
        self.source_type = None
        self.licensed = None
        self.hentai = None
        self.twitter_hashtag = None
        self.average_score = None
        self.cover_image = None
        self.average_cover_color = None
        self.banner_image = None
        self.trailer = None
        self.genres = None
        self.studios = None
        self.tags = None
        self.relations = None
        self.characters = None
        self.staff = None
        self.recommendations = None
        self.link = None
        self.streaming_links = None
        self.external_links = None
        self.cache_timestamp = None

        for element in self.data:
            element = str(element).replace("\n", "")
            #print(element)

            if element[:11] == "AniList ID:":
                self.anilist_id = utils.convert_to_int(element[12:])
            elif element[:15] == "MyAnimeList ID:":
                self.myanimelist_id = utils.convert_to_int(element[16:])
            elif element[:13] == 'Romaji Title:':
                if self.title is None:
                    self.title = self.AnimeTitle(element[14:])
                else:
                    self.title.addTitle(romaji_title=element[14:])
            elif element[:14] == 'English Title:':
                if self.title is None:
                    self.title = self.AnimeTitle(english_title=element[15:])
                else:
                    self.title.addTitle(english_title=element[15:])
            elif element[:13] == 'Native Title:':
                if self.title is None:
                    self.title = self.AnimeTitle(native_title=element[14:])
                else:
                    self.title.addTitle(native_title=element[14:])
            elif element[:21] == 'Alternative Title(s):':
                if self.title is None:
                    self.title = self.AnimeTitle(
                        alternative_titles=element[22:].split(':::'))
                else:
                    self.title.addAlternativeTitle(element[22:].split(':::'))
            elif element[:5] == 'Type:':
                self.type = str(element[6:])
            elif element[:7] == 'Format:':
                self.format = str(element[8:])
            elif element[:7] == 'Status:':
                self.status = str(element[8:])
            elif element[:12] == 'Description:':
                self.description = self.AnimeDescription(element[13:])
            elif element[:7] == 'Season:':
                self.season = str(element[8:])
            elif element[:5] == 'Year:':
                self.year = utils.convert_to_int(element[6:])
            elif element[:9] == 'Episodes:':
                self.number_of_episodes = utils.convert_to_int(element[10:])
            elif element[:17] == 'Average Duration:':
                self.episode_duration = utils.convert_to_int(element[18:])
            elif element[:27] == 'First Episode Release Date:':
                self.first_episode_release_date = self.AnimeDate(element[28:])
            elif element[:26] == 'Last Episode Release Date:':
                self.last_episode_release_date = self.AnimeDate(element[27:])
            elif element[:8] == 'Country:':
                self.country = str(element[9:])
            elif element[:18] == 'Source Media Type:':
                self.source_type = str(element[19:])
            elif element[:9] == 'Licensed?':
                self.licensed = utils.convert_to_boolean(element[10:])
            elif element[:7] == 'Hentai?':
                self.hentai = utils.convert_to_boolean(element[8:])
            elif element[:16] == 'Twitter Hashtag:':
                self.twitter_hashtag = str(element[17:])
            elif element[:14] == 'Average Score:':
                self.average_score = utils.convert_to_int(element[15:])
            elif element[:12] == 'Cover Image:':
                self.cover_image = str(element[13:])
            elif element[:20] == 'Average Cover Color:':
                self.average_cover_color == str(element[21:])
            elif element[:13] == 'Banner Image:':
                self.banner_image = str(element[14:])
            elif element[:8] == 'Trailer:':
                self.trailer = str(element[9:])
            elif element[:7] == 'Genres:':
                self.genres = self.AnimeGenres(element[8:].split(':::'))
            elif element[:8] == '[STUDIO]':
                if self.studios is None:
                    self.studios = [self.AnimeStudio(element[9:].split('|||'))]
                else:
                    self.studios.append(
                        self.AnimeStudio(element[9:].split('|||')))
            elif element[:5] == '[TAG]':
                if self.tags is None:
                    self.tags = [self.AnimeTag(element[6:].split('|||'))]
                else:
                    self.tags.append(self.AnimeTag(element[6:].split('|||')))
            elif element[:10] == '[RELATION]':
                if self.relations is None:
                    self.relations = [
                        self.AnimeRelation(element[11:].split('|||'))
                    ]
                else:
                    self.relations.append(
                        self.AnimeRelation(element[11:].split('|||')))
            elif element[:11] == '[CHARACTER]':
                if self.characters is None:
                    self.characters = [
                        self.AnimeCharacter(element[12:].split('|||'))
                    ]
                else:
                    self.characters.append(
                        self.AnimeCharacter(element[12:].split('|||')))
            elif element[:7] == '[STAFF]':
                if self.staff is None:
                    self.staff = [self.AnimeStaff(element[8:].split('|||'))]
                else:
                    self.staff.append(self.AnimeStaff(
                        element[8:].split('|||')))
            elif element[:16] == '[RECOMMENDATION]':
                if self.recommendations is None:
                    self.recommendations = [
                        self.AnimeRecommendation(element[17:].split('|||'))
                    ]
                else:
                    self.recommendations.append(
                        self.AnimeRecommendation(element[17:].split('|||')))
            elif element[:16] == '[streaming link]':
                episode_fallback += 1
                # Info extraction
                try:
                    element = element[17:]
                    link = re.findall("(https?:\/\/\S+)", element)[0]
                    if link.find('www.crunchyroll.com') != -1:
                        element = element.split(": http")[0].split(" - ")
                        episode = utils.convert_to_int(
                            element[0].lower().replace("episode", ""))
                        title = str(element[1])
                    else:
                        element = element.split(": http")[0]
                        episode = episode_fallback + 1
                        title = str(element[0])

                    # Appending results
                    if self.streaming_links is None:
                        self.streaming_links = [
                            self.AnimeStreamingLink(link=link,
                                                    episode=episode,
                                                    title=title)
                        ]
                    else:
                        self.streaming_links.append(
                            self.AnimeStreamingLink(link=link,
                                                    episode=episode,
                                                    title=title))
                except:
                    pass

            elif element[:15] == '[external link]':
                element = element[16:]
                link = re.findall("(https?:\/\/\S+)", element)[0]
                site = element.split(": ")[0]
                if self.external_links is None:
                    self.external_links = [
                        self.AnimeExternalLink(link=link, site=site)
                    ]
                else:
                    self.external_links.append(
                        self.AnimeExternalLink(link=link, site=site))
            elif element[:16] == 'Cache Timestamp:':
                self.cache_timestamp = self.CacheTimestamp(
                    utils.convert_to_float(element[17:]))
        if self.anilist_id is not None:
            self.link = "https://anilist.co/anime/" + str(self.anilist_id)
Beispiel #25
0
async def on_message(message):
    '''
    When the bot receives a message
    '''
    #await message.add_reaction(roger_reaction) # REACT TO SHOW THAT THE BOT HAS UNDESTAND HIS COMMAND
    if message.author.id == client.user.id:
        return
    if message.content.startswith('.erina'):  # ERINA SPECIFIC COMMAND
        userCommand = utils.removeSpaceBefore(str(message.content)[6:])
        commandLength = len(userCommand.split(" ")[0])
        command, commandSimilarity = searchCommand(
            userCommand.split(" ")[0].lower())
        if commandSimilarity < 0.75:
            await message.channel.send("Sorry, this command is not available.")
            return
        else:
            if command == "search":
                query = utils.removeSpaceBefore(userCommand[commandLength:])
                log(
                    "ErinaDiscord", "New info hit from @" +
                    str(message.author) + " (asking for " + str(query) + ")")
                StatsAppend(DiscordStats.infoHit,
                            f"{str(query)} >>> {str(message.author)}")
                anime, thumbnail, discordResponse = Parser.makeInfoResponse(
                    erinasearch.searchAnime(query))
                if discordResponse is not None:
                    newEmbed = discord.Embed(title='Anime Info',
                                             colour=discord.Colour.blue())
                    newEmbed.add_field(name=anime.capitalize(),
                                       value=discordResponse)

                    if thumbnail is not None:
                        newEmbed.set_thumbnail(url=thumbnail)

                    await message.channel.send(embed=newEmbed)
                else:
                    await message.channel.send(
                        "An error occured while searching for your anime: " +
                        query)
            elif command == "description":
                query = utils.removeSpaceBefore(userCommand[commandLength:])
                log(
                    "ErinaDiscord", "New description hit from @" +
                    str(message.author) + " (asking for " + str(query) + ")")
                StatsAppend(DiscordStats.descriptionHit,
                            f"{str(query)} >>> {str(message.author)}")
                anime, thumbnail, discordResponse = Parser.makeDescriptionResponse(
                    erinasearch.searchAnime(query))
                if discordResponse is not None:
                    newEmbed = discord.Embed(
                        title=f'Anime Description: {str(anime)}',
                        colour=discord.Colour.blue())
                    newEmbed.add_field(name=anime.capitalize(),
                                       value=discordResponse)

                    if thumbnail is not None:
                        newEmbed.set_thumbnail(url=thumbnail)

                    await message.channel.send(embed=newEmbed)
                else:
                    await message.channel.send(
                        "An error occured while searching for your anime: " +
                        query)
            elif command == "dev":
                await StaticResponse.erinadev(message.channel)
            elif command == "donate":
                await StaticResponse.erinadonate(message.channel)
            elif command == "help":
                await StaticResponse.erinahelp(message.channel, message.author)
            elif command == "stats":
                await StaticResponse.erinastats(message.channel, client)
            elif command == "invite":
                await StaticResponse.erinainvite(message.channel)
    else:
        if any([
                flag in str(message.content).lower()
                for flag in (config.Discord.flags if str(config.Discord.flags).
                             replace(" ", "") not in
                             ["None", "", "[]"] else config.Erina.flags)
        ]):
            listOfResults = []
            #await message.add_reaction(roger_reaction) # REACT TO SHOW THAT THE BOT HAS UNDESTAND HIS COMMAND
            log("ErinaDiscord",
                "New image search from @" + str(message.author))
            StatsAppend(DiscordStats.imageSearchHit, f"{str(message.author)}")
            for file in message.attachments:
                if filecenter.type_from_extension(
                        filecenter.extension_from_base(file.filename)
                ) == 'Image':  # If the file is an image
                    current_anime = Parser.makeImageResponse(
                        erinasearch.imageSearch(
                            file.url))  # Get infos about the anime
                    listOfResults.append(
                        current_anime)  # Append to the results list
            else:
                message_history = await message.channel.history(
                    limit=5
                ).flatten()  # Search from the last 3 messages a picture
                for message in message_history:
                    for file in message.attachments:
                        if filecenter.type_from_extension(
                                filecenter.extension_from_base(file.filename)
                        ) == 'Image':  # If the file is an image
                            current_anime = Parser.makeImageResponse(
                                erinasearch.imageSearch(
                                    file.url))  # Get infos about the anime
                            listOfResults.append(
                                current_anime)  # Append to the results list

            if len(listOfResults) == 0:
                await message.channel.send("Sorry, I couldn't find anything..."
                                           )

            elif len(listOfResults) == 1:
                title, thumbnail, reply = listOfResults[0]
                if reply is None:
                    await message.channel.send(
                        "An error occured while retrieving information on the anime..."
                    )

                await message.channel.send(f"It seems to be {title}!")

                newEmbed = discord.Embed(title='Anime Info',
                                         colour=discord.Colour.blue())
                newEmbed.add_field(name=title.capitalize(), value=reply)

                if thumbnail is not None:
                    newEmbed.set_thumbnail(url=thumbnail)

                await message.channel.send(embed=newEmbed)
                await asyncio.sleep(1)

            else:
                for iteration, result in enumerate(listOfResults):
                    number = ''
                    if iteration == 0:
                        number = '1st'
                    elif iteration == 1:
                        number = '2nd'
                    elif iteration == 2:
                        number = '3rd'
                    else:
                        number = f'{str(iteration)}th'

                    title, thumbnail, reply = result
                    if reply is None:
                        await message.channel.send(
                            f"An error occured while searching for the {number} anime"
                        )

                    await message.channel.send(
                        f"The {number} anime seems to be {title}!")

                    newEmbed = discord.Embed(title='Anime Info',
                                             colour=discord.Colour.blue())
                    newEmbed.add_field(name=title.capitalize(), value=reply)

                    if thumbnail is not None:
                        newEmbed.set_thumbnail(url=thumbnail)

                    await message.channel.send(embed=newEmbed)
                    await asyncio.sleep(1)
    return
Beispiel #26
0
    def __init__(self, data) -> None:
        StatsAppend(erina.erinaParsingCount, "ErinaDB")
        # Normalize to get the same type of data everytime
        if isinstance(data, list):
            "\n".join(data)
        else:
            data = str(data)

        self.data = data.split("\n")

        self.anilist_id = None
        self.title = None
        self.season = None
        self.episode = None
        self.first_frame = None
        self.last_frame = None
        self.timing = None
        self.hash = None
        self.hash_algorithm = None
        self.filename = None
        self.framerate = None
        self.episode_duration = None
        self.episode_framecount = None
        self.analyze_timestamp = None
        
        for element in self.data:
            element = str(element).replace("\n", "")
            if element[:11] == 'AniList ID:':
                self.anilist_id = utils.convert_to_int(element[12:])
            elif element[:6] == 'Anime:':
                self.title = self.AnimeTitle(romaji_title=element[7:])
            elif element[:7] == 'Season:':
                self.season = utils.convert_to_int(element[8:])
            elif element[:8] == 'Episode:':
                self.episode = utils.convert_to_int(element[9:])
            elif element[:12] == 'First Frame:':
                self.first_frame = utils.convert_to_int(element[13:]) 
            elif element[:11] == 'Last Frame:':
                self.last_frame = utils.convert_to_int(element[12:])
            elif element[:5] == 'From:':
                if self.timing is None:
                    self.timing = self.Timing(from_time=element[6:])
                else:
                    self.timing.addTiming(from_time=element[6:])
            elif element[:3] == 'To:':
                if self.timing is None:
                    self.timing = self.Timing(to=element[4:])
                else:
                    self.timing.addTiming(to=element[4:])
            elif element[:3] == 'At:':
                if self.timing is None:
                    self.timing = self.Timing(at=element[4:])
                else:
                    self.timing.addTiming(at=element[4:])
            elif element[:5] == 'Hash:':
                self.hash = str(element[6:])
            elif element[:18] == 'Hashing Algorithm:':
                self.hash_algorithm = str(element[19:])
            elif element[:9] == 'Filename:':
                self.filename = str(element[10:])
            elif element[:18] == 'Episode Framerate:':
                self.framerate = utils.convert_to_float(element[19:])
            elif element[:17] == 'Episode Duration:':
                self.episode_duration = utils.convert_to_float(element[18:])
            elif element[:20] == 'Episode Frame Count:':
                self.episode_framecount = utils.convert_to_int(element[21:])
            elif element[:13] == 'Analyze Date:':
                self.analyze_timestamp = self.AnalyzeTimestamp(element[14:])
Beispiel #27
0
    def on_status(self, tweet, force=False):
        """
        Tweet Receiving
        """
        global sinceID
        StatsAppend(TwitterStats.streamHit)
        if TwitterConfig.ignore_rt and Twitter.isRetweet(tweet):
            return
        try:
            if Twitter.isReplyingToErina(
                    tweet
            ):  # If replying, analyze if it is a positive or a negative feedback
                responseSentiment = sentiment(tweet.text)[0]
                StatsAppend(TwitterStats.responsePolarity, responseSentiment)
                latestResponses.append({
                    "timestamp":
                    time(),
                    "user":
                    tweet.user.screen_name,
                    "text":
                    tweet.text,
                    "sentiment":
                    responseSentiment,
                    "url":
                    "https://twitter.com/twitter/statuses/" + str(tweet.id),
                })
        except:
            traceback.print_exc()

        if isinstance(
                TwitterConfig.monitoring.accounts,
            (list, tuple)) and len(TwitterConfig.monitoring.accounts) > 0:
            if TwitterConfig.monitoring.check_replies and Twitter.isReplyingToErina(
                    tweet):  # Monitor Mode ON, Check Replies to Monitored ON
                log("ErinaTwitter",
                    "New monitoring hit from @" + str(tweet.user.screen_name))
                StatsAppend(TwitterStats.askingHit,
                            str(tweet.user.screen_name))
                imageURL = Twitter.findImage(tweet)
                if imageURL is None:
                    imageURL = Twitter.findParentImage(tweet)
                if imageURL is not None:
                    searchResult = imageSearch(imageURL)
                    tweetResponse = makeTweet(searchResult)
                    if tweetResponse is not None:
                        StatsAppend(TwitterStats.responses)
                        ErinaTwitter.tweet(tweetResponse, replyID=tweet.id)
            elif tweet.user.screen_name in TwitterConfig.monitoring.accounts:  # Monitor Mode ON, Check Replies to Monitored OFF
                log("ErinaTwitter", "New monitoring hit")
                StatsAppend(TwitterStats.askingHitstr(tweet.user.screen_name))
                imageURL = Twitter.findImage(tweet)
                if imageURL is not None:
                    searchResult = imageSearch(imageURL)
                    tweetResponse = makeTweet(searchResult)
                    if tweetResponse is not None:
                        StatsAppend(TwitterStats.responses)
                        ErinaTwitter.tweet(tweetResponse, replyID=tweet.id)

        else:  # Monitor Mode OFF, Public Account
            imageURL = Twitter.findImage(tweet)
            if imageURL is None:
                imageURL = Twitter.findParentImage(tweet)
            if imageURL is not None and Twitter.isAskingForSauce(
                    tweet) or force:
                log("ErinaTwitter",
                    "New asking hit from @" + str(tweet.user.screen_name))
                StatsAppend(TwitterStats.askingHit,
                            str(tweet.user.screen_name))
                searchResult = imageSearch(imageURL)
                tweetResponse = makeTweet(searchResult)
                if tweetResponse is not None:
                    StatsAppend(TwitterStats.responses)
                    responseImageURL = None
                    if isinstance(searchResult.detectionResult, TraceMOECache):
                        if TwitterConfig.image_preview:
                            if not searchResult.detectionResult.hentai:
                                responseImageURL = f"https://trace.moe/thumbnail.php?anilist_id={str(searchResult.detectionResult.anilist_id)}&file={str(searchResult.detectionResult.filename)}&t={str(searchResult.detectionResult.timing.at)}&token={str(searchResult.detectionResult.tokenthumb)}"
                    ErinaTwitter.tweet(tweetResponse,
                                       replyID=tweet.id,
                                       imageURL=responseImageURL)
                elif Twitter.isMention(tweet):
                    ErinaTwitter.tweet(
                        "Sorry, I searched everywhere but coudln't find it...",
                        replyID=tweet.id)
        TextFile(erina_dir + "/ErinaTwitter/lastStatusID.erina").write(
            str(tweet.id))
        sinceID = tweet.id
        return