Esempio n. 1
0
    async def search(app: AppContext, query: str) -> List[SearchResult]:
        """
        Searches for a query on nyaa.si.

        Parameters
        ----------
        query: str
            The search query.
        """
        url = NyaaSearcher._get_query_url(query)
        loop = asyncio.get_running_loop()

        feed = await loop.run_in_executor(None, feedparser.parse, url)
        found = []
        for item in feed["entries"]:
            try:
                anitopy.parse(item["title"])
            except Exception as e:
                logger.warn(
                    f"Anitopy - Could not Parse '{item['title']}', skipping")
                continue

            found.append(SearchResult.from_dict(app, item))

        return found
Esempio n. 2
0
def parse_from_content(content: "content_type",
                       *,
                       name_processor: Callable[[content_type],
                                                str] = lambda x: x,
                       stream_url_processor: Callable[[content_type],
                                                      str] = lambda x: x,
                       overrides: Dict = {},
                       episode_parsed: bool = False):

    anitopy_result = anitopy.parse(name_processor(content))

    returnee = {"stream_url": stream_url_processor(content)}
    video_res = anitopy_result.get("video_resolution") or ""

    if not episode_parsed:
        returnee.update(
            {"episode": int(anitopy_result.get("episode_number", 0) or 0)})

    if isinstance(video_res, str):
        stripped = video_res.strip("p")
        if stripped.isdigit():
            returnee.update({"quality": int(stripped)})

    returnee.update(overrides)

    return returnee
Esempio n. 3
0
    def get_episode_number(self, file_name: str) -> int:
        """
        Using the `file_name` argument, you must parse
        the file name in order to get the episode of the release.

        Failure to do so will result in incorrect matching.

        Returning `None` will stop the matching.
        """
        parsed = anitopy.parse(file_name)

        if "anime_type" in parsed.keys():
            return

        extra_info = parsed.get("release_information", "")
        if "batch" in extra_info.lower():
            return

        try:
            episode = parsed["episode_number"]
        except KeyError:
            return

        try:
            return int(episode)
        except (ValueError, TypeError):
            return
Esempio n. 4
0
    def _load_show(self, args, episode):
        """
        Gets the show name from the ArgumentHandler, or finds it if a new episode was downloaded without
        including the name of the show.

        Params:
            conf: A ConfigHandler object that is already populated
            args: An ArgumentHandler object that is already populated

        Returns: The name of the show of the episode as a string
        """

        # Args may already have determined the show from the passed-in args. If so, just return that.
        show = args.show
        if show:
            self._logger.info(self._prints.SHOW_LOADED_ARG.format(show))
            return show

        # Unique to Acquisition2: Because Izumi is no longer supported, just use the provided show name.
        data = anitopy.parse(episode)
        if 'anime_title' in data:
            self._logger.info(
                self._prints.SHOW_LOADED_ARG.format(data['anime_title']))
            return data['anime_title']

        # If the show wasn't included, this means the episode was included on its own.
        # What's nice about this? The Hisha library takes care of it for us.
        show = hisha2.hisha(self._episode)
        self._logger.info(self._prints.SHOW_LOADED_HISHA.format(show))

        # There should be a case where Hisha doesn't find it
        # But hisha3 will need to be developed first

        return show
Esempio n. 5
0
def _clean_episode_name(unclean_name): 
    """
    Clean the episode name for new usage.
    Parameter unclean_name should only be the file name, no paths.
    """
    info = anitopy.parse(unclean_name)

    new_name = info['anime_title']

    if 'anime_season' in info:
        Ayumi.debug('Found anime_season "{}"'.format(info['anime_season']))
        new_name = new_name + " S" + str(info['anime_season'])

    if 'episode_number' in info:
        Ayumi.debug('Found episode_number "{}"'.format(info['episode_number']))
        new_name = new_name + " - " + str(info['episode_number'])

    if 'video_resolution' in info:
        Ayumi.debug('Found video_resolution "{}"'.format(info['video_resolution']))
        new_name = new_name + " [{}]".format(info['video_resolution'])

    if 'other' in info and 'uncensored' in info['other'].lower():
        Ayumi.debug('Detected this episode is uncensored, adding "(Uncensored)" to the title.')

    _, ext = os.path.splitext(unclean_name)
    new_name += ext

    Ayumi.debug('Complete new file name: {}'.format(new_name))
    return new_name
Esempio n. 6
0
    def test_fails(self):
        failed = 0
        working_tests = []
        for index, entry in enumerate(failing_table):
            filename = entry[0]
            options = self.parse_options(entry[1])

            try:
                print('Index %d "%s"' % (index, filename))
            except:
                print(('Index %d "%s"' % (index, filename)).encode("utf-8"))

            elements = anitopy.parse(filename, options=options)

            expected = dict(entry[2])
            if 'id' in expected.keys():
                del expected['id']
            try:
                self.assertEqual(expected, elements)
                working_tests.append(index)
            except AssertionError as err:
                failed += 1
                print(err)
                print(
                    '----------------------------------------------------------------------'
                )  # noqa E501

        print('\nFailed %d of %d failing cases tests' %
              (failed, len(failing_table)))
        if working_tests:
            print(
                'There are {} working tests from the failing cases: {}'.format(
                    len(working_tests), working_tests))
Esempio n. 7
0
    async def get_episodes(self) -> List[int]:
        """
        Returns a list of episodes that are contained
        within the torrent.

        Returns
        -------
        List[int]:
            List of episodes.
        """
        files = await self._app.dl_client.get_file_structure(self.torrent_link)
        episodes = []
        for file in files:
            try:
                parsed = anitopy.parse(file)
            except Exception as e:
                logger.warn(f"anitopy - Could not Parse '{file}', skipping")
                continue

            if "anime_type" in parsed.keys():
                continue

            try:
                episodes.append(int(parsed["episode_number"]))
            except (KeyError, ValueError, TypeError):
                pass

        return episodes
Esempio n. 8
0
def hisha(episode, title="userPreferred"):
    # returns the show name
    data = anitopy.parse(episode)
    names = _names(data['anime_title'])

    res = _hisha(names)
    return res.json()['data']['Media']['title'][title]
Esempio n. 9
0
def check_torrent(title, feed_id):
    info = anitopy.parse(title)

    check_title = False
    check_resolution = False

    if len(feeds[feed_id]['resolutions']) == 0:
        feeds[feed_id]['resolutions'] = [config['resolution']]

    try:
        if info['video_resolution'] in feeds[feed_id]['resolutions']:
            check_resolution = True
    except KeyError:
        check_resolution = False

    if len(feeds[feed_id]['anime-list']) == 0:
        check_title = True
    else:
        for anime in feeds[feed_id]['anime-list']:
            if anime['title'] == info['anime_title']:
                check_title = True
                try:
                    if anime['resolution'] == info['video_resolution']:
                        check_resolution = True
                    else:
                        check_resolution = False
                except KeyError:
                    print('No resolution was set for the anime: %s' %
                          anime['title'])

    return check_title and check_resolution
Esempio n. 10
0
def hisha(episode, title="userPreferred"):
    """
    The default Hisha module, for standard airing cases. This will attempt to find
    the show in airing, and then not_yet_aired, and then premiered.

    It also tries multiple versions of the search:
        1. Last word removed (i.e., some kind of Season indicator)
        2. Replace the first " - " to ": " and remove the other " - "

    Forces os-level application exit if _hisha fails to find a show.

    Params:
        episode: The full string of the episode to find
        title: A string for which language the Anilist title should return

    Returns: The show of the episode provided
    """
    data = anitopy.parse(episode)
    names = _names(data['anime_title'])

    # Get the show name with the helper.
    try:
        res = _hisha(names)
    except:
        # An exception will be thrown if nothing was found. We exit if that's the case.
        print("No show was found when searching. The system will now exit.")
        raise Exception()

    # Backwards-compatability: Hisha will return the title
    return res.json()['data']['Media']['title'][title]
Esempio n. 11
0
    def get_show_name(self, file_name: str) -> str:
        """
        Using the `file_name` argument, you must parse
        the file name in order to get the name of the show.

        Failure to do so will result in incorrect matching.
        """
        parsed = anitopy.parse(file_name)

        return parsed["anime_title"]
Esempio n. 12
0
def parse():
    feeds = open("../rsc/feeds.txt").readlines()
    l = list()
    rlist = list()
    for f in feeds:
        l.append(feedparser.parse(f))
    for rss in l:
        for item in rss.entries:
            rlist.append(anitopy.parse(item.title))

    return rlist
Esempio n. 13
0
    def test_table(self):
        for index, entry in enumerate(table):
            filename = entry[0]
            options = self.parse_options(entry[1])

            elements = anitopy.parse(filename, options=options)

            expected = dict(entry[2])
            if 'id' in expected.keys():
                del expected['id']
            self.assertEqual(expected, elements, 'on entry number %d' % index)
Esempio n. 14
0
def get_title_from_name(name):
    if not is_empty(name) and isinstance(name, str):
        try:
            info = dict_2_default_dict(anitopy.parse(name))
            extracted_name = info["anime_title"]
            if extracted_name:
                return extracted_name
            ptn_info = dict_2_default_dict(PTN.parse(name))
            if ptn_info["title"]:
                return ptn_info["title"]
            return None
        except TypeError:
            return None
    return None
Esempio n. 15
0
def hisha2(episode):
    """
    Hisha2, unlike hisha, simply returns all the json it gets.
    """
    data = anitopy.parse(episode)
    names = _names(data['anime_title'])

    try:
        res = _hisha(names)
    except:
        # An exception will be thrown if nothing was found. We exit if that's the case.
        print("No show was found when searching. The system will now exit.")
        raise Exception()
    return res.json()
Esempio n. 16
0
def _rename_mass(filename):
    """
    
    Usage for mass renaming files within a folder.

    Strip out fansub names from the filename
    Params: 
    - filename: The original, dirty filename

    Does not change extension, returns new filename
    """

    p = anitopy.parse(filename)
    new_file = p['anime_title']

    # Season
    if 'anime_season' in p:
        new_file += " S" + p['anime_season']

    # Episode number, NCED
    if 'episode_number' in p:
        new_file += " - " + p['episode_number']

    # Release version
    if 'release_version' in p:
        new_file += "v" + p['release_version']

    # If uncensored, mark it
    if 'other' in p and 'uncensored' in p['other'].lower():
        new_file += " (Uncensored)"

    # Video res might not be included
    if 'video_resolution' in p:
        if "1920x1080" in p['video_resolution']:
            p['video_resolution'] = "1080p"

        p['video_resolution'] = p['video_resolution'].replace("P", "p")

        if 'p' not in p['video_resolution'].lower():
            p['video_resolution'] += "p"

        new_file += " [" + p['video_resolution'] + "]"

    new_file += "." + p['file_extension']
    return new_file
Esempio n. 17
0
 def __parse_name(self, name: str) -> dict:
     anitopy_options = dict({
         "parse_episode_number": True,
         "parse_episode_title": False,
         "parse_file_extension": False,
         "parse_release_group": False,
     })
     parse_result = anitopy.parse(name, options=anitopy_options)
     if (parse_result["anime_title"]
             is not None) and (parse_result["episode_number"] is not None):
         parse_result["anime_title"] = pathlib.Path(
             parse_result["anime_title"]).name
         return dict({
             "title": parse_result["anime_title"],
             "ep_num": parse_result["episode_number"],
         })
     else:
         return None
Esempio n. 18
0
def get_files_to_mux(input_files_from_options_file):
    #files may not be in same directory
    files_to_mux = {}

    for input_file_path in input_files_from_options_file:
        #the extension of the input file
        input_file_extension = pathlib.Path(input_file_path).suffix
        files_added = 1
        for entry in os.scandir(r'{0}'.format(
                os.path.dirname(input_file_path))):
            #used when anitopy can't get episode number from file
            if not entry.is_dir():
                #they should be the same extension as the original input_file_path in input_files_from_options_file
                if pathlib.Path(entry.path).suffix == input_file_extension:
                    parsed_file = anitopy.parse(entry.name)

                    #find out where to insert the file
                    insertion_index = files_added
                    if 'episode_number' in parsed_file:
                        insertion_index = parsed_file['episode_number']
                        #TODO test anitopy names that return a list for episode number
                        #ask user to manually input episode number? input can be for every episode or it can be for a rule to select the first, second, ... n element in the episdoe number list
                        if isinstance(insertion_index, list):
                            print(
                                "Anitopy detected this file's episode number as a list. This may effect the accuracy of the mux."
                            )
                            insertion_index = insertion_index[0]

                    if insertion_index in files_to_mux:
                        files_to_mux[insertion_index].append(entry.path)
                    else:
                        files_to_mux[insertion_index] = [entry.path]

                    files_added += 1
    #confirm that each mux_file_group is the same length. If not then one input directory has more files than the other
    confirm_files_are_mapped_properly = {}
    for mux_file_group in files_to_mux.values():
        confirm_files_are_mapped_properly[len(mux_file_group)] = 0
    if len(confirm_files_are_mapped_properly) > 1:
        print("Files could not be mapped properly!")
        print("One of your directories has more files than the other")
        return None

    return files_to_mux
Esempio n. 19
0
    def ignore_logic(self, item: dict) -> bool:
        """
        Optional

        This is the first function a parser runs.

        If this returns False, the item will instantly
        be ignored by Tsundoku. Any other return value
        will continue operation as normal.
        """
        parsed = anitopy.parse(item["title"])

        title = parsed.get("anime_title")
        episode = parsed.get("episode_number")
        resolution = parsed.get("video_resolution")
        if title is None or episode is None or resolution is None:
            return False
        elif resolution != "1080p":
            return False
Esempio n. 20
0
def _generate_new_filename(dl_file):
    info = anitopy.parse(dl_file)
    new_dl_file = info['anime_title']
    if 'anime_season' in info:
        Ayumi.debug('Found anime_season "{}"'.format(info['anime_season']))
        new_dl_file = new_dl_file + " S" + str(info['anime_season'])
    if 'episode_number' in info:
        Ayumi.debug('Found episode_number "{}"'.format(info['episode_number']))
        new_dl_file = new_dl_file + " - " + str(info['episode_number'])
    if 'video_resolution' in info:
        Ayumi.debug('Found video_resolution "{}"'.format(
            info['video_resolution']))
        new_dl_file = new_dl_file + " [{}]".format(info['video_resolution'])
    if 'other' in info and 'uncensored' in info['other'].lower():
        Ayumi.debug(
            'Detected this episode is uncensored, adding "(Uncensored)" to the title.')
        new_dl_file += " (Uncensored)"
    _, ext = os.path.splitext(dl_file)
    new_dl_file += ext
    Ayumi.debug('returning new_dl_file: {}'.format(new_dl_file))
    return new_dl_file
Esempio n. 21
0
    def resolve_file(self, root: Path, episode: int) -> Optional[Path]:
        """
        Searches a directory tree for a specific episode
        file.

        If the passed path is a file, return it.

        Parameters
        ----------
        path: Path
            The directory.
        episode: int
            The episode to search for.

        Returns
        -------
        Optional[Path]
            The found Path. It is a file.
        """
        if root.is_file():
            return root

        root.resolve()
        for subpath in root.rglob("*"):
            try:
                parsed = anitopy.parse(subpath.name)
            except Exception:
                logger.debug(
                    f"Anitopy - Could not parse `{subpath.name}`, skipping")
                continue

            try:
                found = int(parsed["episode_number"])
            except (KeyError, ValueError, TypeError):
                continue
            if found == episode:
                return subpath

        return None
Esempio n. 22
0
def parse_episode_from_name(name):
    episode, season = None, None
    if name and not is_empty(name) and isinstance(name, str):
        try:
            ptn_info = dict_2_default_dict(PTN.parse(name))
            episode = ptn_info["episode"]
            season = ptn_info["season"]
        except:
            pass
        if not episode or not season:
            try:
                info = dict_2_default_dict(anitopy.parse(name))
                episode = info["episode_number"] if not episode else episode
                season = info["anime_season"] if not season else season
            except:
                print("anitopy error")
                pass
        try:
            return {"episode": parse_episode(episode) if episode else None,
                    "season": get_number_from_string(season) if season else None}
        except ValueError or AttributeError:
            return {"episode": None, 'season': None}
    return {"episode": None, 'season': None}
Esempio n. 23
0
    def _generate_new_episode(self, episode):
        """
        Takes the old filename and generates a new, clean name for it.

        Params:
            episode - the name of the old episode file

        Return: A "cleaned" filename
        """

        # Parse the filename
        a = anitopy.parse(episode)

        # Generate the new episode name
        new_episode = a['anime_title']

        if 'anime_season' in a:
            new_episode = new_episode + " S" + str(a['anime_season'])

        if 'episode_number' in a:
            new_episode = new_episode + " - " + a['episode_number']

        # Mark video resolution
        if 'video_resolution' in a:
            new_episode = new_episode + " [" + a['video_resolution'] + "]"

        # Mark if uncensored
        if 'other' in a and 'uncensored' in a['other'].lower():
            new_episode += " (Uncensored)"

        # Add the extension
        _, ext = os.path.splitext(episode)
        new_episode += ext

        self._logger.warning(self._prints.EPISODE_NEW_NAME.format(new_episode))
        return new_episode
Esempio n. 24
0
def database_builder():
    list_database = []
    list_origin_database = []
    list_torrents_dir = [
        f for f in os.listdir('torrents')
        if os.path.isfile(os.path.join('torrents', f))
    ]
    origin_database_loaded = False
    database_loaded = False
    full_loaded = False
    if os.path.isfile('anime_database.json'):
        anime_database_file = open('anime_database.json', encoding='utf-8')
        anime_database_obj = json.load(anime_database_file)
    else:
        color.color_print(Fore.CYAN, '[INFO]',
                          'MISSING OFFLINE DATABASE. DOWNLOADING...')
        color.color_print(
            Fore.CYAN, '[INFO]',
            'THANK YOU FOR MANAMI PROJECT TO PROVIDE ME A OFFLINE DATABASE')
        urllib.request.urlretrieve(
            'https://raw.githubusercontent.com/manami-project/anime-offline-database/master/anime-offline-database.json',
            'anime_database.json')
        color.color_print(Fore.CYAN, '[INFO]', 'DOWNLOAD COMPLETED\n')
    if os.path.isfile('output\\database_original.json'):
        origin_database_json_file = open('output\\database_original.json',
                                         encoding='utf-8')
        try:
            origin_database_obj = json.load(origin_database_json_file)
            origin_database_loaded = True
        except:
            origin_database_loaded = False
        database_json_file = open('output\\database.json', encoding='utf-8')
        try:
            database_obj = json.load(database_json_file)
            database_loaded = True
        except:
            database_loaded = False
    if origin_database_loaded and database_loaded:
        full_loaded = True
        list_database = database_obj
        list_origin_database = origin_database_obj

    added_new_state = False
    color.color_print(Fore.LIGHTMAGENTA_EX, '[COPYRIGHT]',
                      'MANAMI PROJECT: ANIME OFFLINE DATABASE')
    color.color_print(Fore.LIGHTMAGENTA_EX, '[COPYRIGHT]',
                      'IGORCMOURA: ANITOPY')
    color.color_print(Fore.LIGHTMAGENTA_EX, '[COPYRIGHT]', 'JCUL: BENCODE\n')
    color.color_print(Fore.YELLOW, '[PROCESSING]', 'PARSE TORRENTS\n')
    for i in list_torrents_dir:
        torrent_filename = i
        torrent_full_path = 'torrents\\' + i
        with open(torrent_full_path, 'rb') as fh:
            torrent_data = fh.read()

        if not search_database(list_database, i) or not full_loaded:
            torrent = bencode.decode(torrent_data)
            torrent_announces = []
            torrent_files = []
            torrent_creation_date = ''
            torrent_hash = ''
            torrent_magnet = ''
            torrent_total_length = 0
            for im in torrent:
                torrent_creation_date = (
                    datetime.utcfromtimestamp(int(im[b'creation date'])) -
                    timedelta(hours=9)).strftime('%Y-%m-%d %H:%M:%S')
                torrent_temp_announce = []

                for imfw in im[b'announce-list']:
                    torrent_temp_announce.append(imfw[0].decode("utf-8"))
                    torrent_hash = str(
                        hashlib.sha1(bencoding.bencode(
                            im[b'info'])).hexdigest())
                    magnet_temp = 'magnet:?xt=urn:btih:{}'.format(torrent_hash)
                    torrent_magnet = magnet_temp
                torrent_announces = torrent_temp_announce
                if b'files' in im[b'info']:
                    for imf in im[b'info'][b'files']:
                        torrent_files.append({
                            'name':
                            imf[b'path'][0].decode("utf-8"),
                            'size':
                            format_size_units(imf[b'length'])
                        })
                        torrent_total_length += imf[b'length']
                else:
                    torrent_total_length = im[b'info'][b'length']
                    torrent_files.append({
                        'name':
                        im[b'info'][b'name'].decode("utf-8"),
                        'size':
                        format_size_units(im[b'info'][b'length'])
                    })

            torrent_size = format_size_units(torrent_total_length)

            info_id = random_string_digits(10)

            result_anitopy = anitopy.parse(torrent_filename)
            anime_db_result = search_anime(anime_database_obj,
                                           result_anitopy['anime_title'])

            json_data_for_add = {}
            json_data_for_add['id'] = info_id
            json_data_for_add['file_name'] = torrent_filename
            json_data_for_add['title'] = result_anitopy['anime_title']
            if 'episode_number' in result_anitopy:
                json_data_for_add['episode'] = result_anitopy['episode_number']
            else:
                json_data_for_add['episode'] = None

            json_data_for_add['hash'] = torrent_hash
            json_data_for_add['size'] = torrent_size
            if 'video_resolution' in result_anitopy:
                json_data_for_add['resolution'] = result_anitopy[
                    'video_resolution']
            else:
                json_data_for_add['resolution'] = None
            if 'video_term' in result_anitopy:
                json_data_for_add['video_codec'] = result_anitopy['video_term']
            else:
                json_data_for_add['video_codec'] = None
            if 'audio_term' in result_anitopy:
                json_data_for_add['audio_codec'] = result_anitopy['audio_term']
            else:
                json_data_for_add['audio_codec'] = None
            if 'release_group' in result_anitopy:
                json_data_for_add['release_group'] = result_anitopy[
                    'release_group']
            else:
                json_data_for_add['release_group'] = None
            json_data_for_add['created_date'] = torrent_creation_date
            json_data_for_add['magnet_url'] = torrent_magnet
            json_data_for_add[
                'torrent_url'] = 'https://anime.cryental.dev/download/' + info_id + '.torrent'
            json_data_for_add['extra'] = {
                'announces': torrent_announces,
                'files': torrent_files
            }

            if not anime_db_result:
                json_data_for_add['metadata'] = None
            else:
                info_type = None
                info_episodes = None
                info_picture = None
                info_thumbnail = None
                info_status = None
                if 'type' in anime_db_result:
                    info_type = anime_db_result['type']
                if 'episodes' in anime_db_result:
                    info_episodes = anime_db_result['episodes']
                if 'picture' in anime_db_result:
                    info_picture = anime_db_result['picture']
                if 'thumbnail' in anime_db_result:
                    info_thumbnail = anime_db_result['thumbnail']
                if 'status' in anime_db_result:
                    info_status = anime_db_result['status']
                json_data_for_add['metadata'] = {
                    'type': info_type,
                    'episodes': info_episodes,
                    'picture': info_picture,
                    'thumbnail': info_thumbnail,
                    'status': info_status
                }
            list_database.append(json_data_for_add)
            if not search_database(list_origin_database, i) or not full_loaded:
                json_original_data_for_add = {}
                json_original_data_for_add['id'] = info_id
                json_original_data_for_add['file_name'] = torrent_filename
                json_original_data_for_add['hash'] = torrent_hash
                with open(torrent_full_path, "rb") as f:
                    encodedZip = base64.b64encode(f.read())
                    json_original_data_for_add['raw_data'] = encodedZip.decode(
                    )
                json_original_data_for_add[
                    'created_date'] = torrent_creation_date
                list_origin_database.append(json_original_data_for_add)
            added_new_state = True
            color.color_print(Fore.YELLOW, '[PROCESSED] ', i)
        else:
            print(Fore.LIGHTRED_EX + '[SKIPPED] ' + Style.RESET_ALL + i)

    if added_new_state or not full_loaded:
        color.color_print(Fore.YELLOW, '[PROCESSING]', 'SORTING LIST')
        list_database.sort(key=sortSecond, reverse=True)
        color.color_print(Fore.YELLOW, '[PROCESSING]', 'SORTING ORIGINAL LIST')
        list_origin_database.sort(key=sortSecond, reverse=True)
        color.color_print(Fore.YELLOW, '[PROCESSING]', 'DISK ACCESSING')
        with open('output\\database.json', 'w') as outfile:
            color.color_print(Fore.YELLOW, '[PROCESSING]', 'WRITING LIST')
            json.dump(list_database, outfile)

        color.color_print(Fore.YELLOW, '[PROCESSING]', 'DISK ACCESSING')
        with open('output\\database_original.json', 'w') as outfile:
            color.color_print(Fore.YELLOW, '[PROCESSING]',
                              'WRITING LIST ORIGINAL')
            json.dump(list_origin_database, outfile)

        color.color_print(Fore.YELLOW, '[PROCESSING]', 'WRITING UPDATED DATE')
        today = datetime.now()
        new_days = open('output\\updated_on.txt', 'w')
        new_days.write(today.strftime("%Y-%m-%d %H:%M:%S"))
        new_days.close()

        color.color_print(Fore.YELLOW, '[PROCESSING]', 'WRITING HASH FILES')
        database_md5 = str(md5('output\\database.json'))
        origin_database_md5 = str(md5('output\\database_original.json'))
        updated_md5 = str(md5('output\\updated_on.txt'))
        with open('output\\database.json.md5', 'w') as outfile:
            json.dump(database_md5, outfile)
        with open('output\\database_original.json.md5', 'w') as outfile:
            json.dump(origin_database_md5, outfile)
        with open('output\\updated_on.txt.md5', 'w') as outfile:
            json.dump(updated_md5, outfile)
    color.color_print(Fore.YELLOW, '[DONE]', 'COMPLETED\n')
Esempio n. 25
0
    show_name = None
    episode_name = None

    new_file = src_path.replace(os.path.commonpath([settings.get('KOTEN_WATCH_PATH', DEFAULT_WATCH_PATH), src_path]) + "/", "")

    if m := util._show_manually_specified(new_file):
        Ayumi.info("Detected show name and episode name in event, using Mode 1.")
        show_name = m.group(1)
        episode_name = util._clean_episode_name(m.group(2))
        Ayumi.info("New show name: {}".format(show_name), color=Ayumi.LYELLOW)
        Ayumi.info("New episode name: {}".format(episode_name), color=Ayumi.LYELLOW)
    else:
        Ayumi.debug("Non-conformant episode provided, using Naomi to find show name.")
        episode_name = util._clean_episode_name(pathlib.PurePath(src_path).name)
        show_name = naomi.find_closest_title(anitopy.parse(new_file)['anime_title'])
        Ayumi.info("New show name: {}".format(show_name), color=Ayumi.LYELLOW)
        Ayumi.info("New episode name: {}".format(episode_name), color=Ayumi.LYELLOW)

        # There is an event where Anilist is down, and Naomi could return None.
        # In this case, use the assumed-parsed show as the title
        if not show_name:
            show_name = anitopy.parse(new_file)['anime_title']

    job = {
        "show": show_name,
        "episode": episode_name,
        "filesize": os.path.getsize(src_path),
        "sub": "softsub"
    }
Esempio n. 26
0
def rss(last_guid=None):

    try:
        with rabbitpy.Connection(
                'amqp://{username}:{password}@{host}:{port}/{vhost}'.format(
                    username=settings.get_fresh('RABBITMQ_USERNAME'),
                    password=settings.get_fresh('RABBITMQ_PASSWORD'),
                    host=settings.get_fresh('RABBITMQ_HOST'),
                    port=settings.get_fresh('RABBITMQ_PORT'),
                    vhost=settings.get_fresh('RABBITMQ_VHOST'))) as conn:
            with conn.channel() as channel:

                Ayumi.set_rabbitpy_channel(channel)
                channel.enable_publisher_confirms()

                while True:

                    Ayumi.info("Now starting feed fetch.", color=Ayumi.LCYAN)

                    feed = feedparser.parse(
                        settings.get('ACQUISITION_RSS_FEED_URL', None))
                    accepted_shows = _load_accepted_shows()
                    Ayumi.debug(
                        "Loaded accepted shows map: {}".format(accepted_shows))
                    history = _load_history()
                    new_history = list()

                    for entry in feed.entries:

                        # Fetch data first
                        title, link, guid = entry.title, entry.link, entry.guid
                        Ayumi.debug(
                            'Encountered RSS item with title "{}", and guid "{}"'
                            .format(title, guid))

                        # If feed item with last GUID encountered, do not process any further
                        if guid == last_guid:
                            Ayumi.debug(
                                "Encountered RSS item with last_guid {} matching argument, breaking and writing history."
                                .format(last_guid),
                                color=Ayumi.YELLOW)
                            break

                        # Check the title data
                        # Use the parsed title to match user provided titles.
                        parsed_title = anitopy.parse(title)['anime_title']
                        if _strip_title(parsed_title) not in accepted_shows:
                            Ayumi.info(
                                'Feed item with title "{}" (show title: "{}") is not in accepted shows, skipping.'
                                .format(title, parsed_title))
                        else:
                            if guid in history:
                                # This item has been previously processed, skip it.
                                Ayumi.info(
                                    'Feed item with title "{}" (show title: "{}") has already been processed, skipping.'
                                    .format(title, parsed_title),
                                    color=Ayumi.GREEN)
                            else:
                                # A new feeditem! Let us process it.
                                Ayumi.info(
                                    'Feed item with title "{}" (show title: "{}") is in accepted shows, processing.'
                                    .format(title, parsed_title),
                                    color=Ayumi.YELLOW)
                                message = rabbitpy.Message(
                                    channel,
                                    json.dumps({
                                        "title":
                                        title,
                                        "link":
                                        link,
                                        "guid":
                                        guid,
                                        "show_title":
                                        accepted_shows[_strip_title(
                                            parsed_title)]
                                    }))
                                acquisition_rss_exchange_name = settings.get(
                                    'ACQUISITION_RSS_EXCHANGE')
                                while not message.publish(
                                        acquisition_rss_exchange_name,
                                        mandatory=True):
                                    Ayumi.warning(
                                        'Failed to publish feed item with title "{}" to exchange "{}", retrying in 60s...'
                                        .format(title,
                                                acquisition_rss_exchange_name),
                                        color=Ayumi.RED)
                                    sleep(60)
                                Ayumi.info(
                                    'Published feed item with title "{}" to exchange "{}".'
                                    .format(
                                        title,
                                        acquisition_rss_exchange_name,
                                    ),
                                    color=Ayumi.LGREEN)

                            # Keep all items processed in the new history - it will be auto deleted by the expiry of the RSS
                            Ayumi.debug(
                                'Appending item "{}" with title "{}" (show title: "{}") to new_history for write.'
                                .format(guid, title, parsed_title),
                                color=Ayumi.YELLOW)
                            new_history.append(guid)

                    _write_history(new_history)

                    # Sleep till the next iteration
                    sleep_duration = settings.get(
                        'ACQUISITION_RSS_SLEEP_INTERVAL',
                        _DEFAULT_SLEEP_INTERVAL)
                    Ayumi.info(
                        "Now sleeping {} seconds.".format(sleep_duration),
                        color=Ayumi.LCYAN)
                    sleep(sleep_duration)

    except rabbitpy.exceptions.AMQPConnectionForced:
        Ayumi.rabbitpy_channel = None
        Ayumi.critical(
            "Operator manually closed RabbitMQ connection, shutting down.",
            color=Ayumi.RED)
        # Use return for now because in some cases, calling exit() may invoke the retry() header.
        return
Esempio n. 27
0
def bittorrent():
    try:
        with rabbitpy.Connection('amqp://{username}:{password}@{host}:{port}/{vhost}'.format(
            username=settings.get('RABBITMQ_USERNAME'),
            password=settings.get('RABBITMQ_PASSWORD'),
            host=settings.get('RABBITMQ_HOST'),
            port=settings.get('RABBITMQ_PORT'),
            vhost=settings.get('RABBITMQ_VHOST')
        )) as conn:
            with conn.channel() as channel:

                Ayumi.set_rabbitpy_channel(channel)
                channel.enable_publisher_confirms()

                queue_name = settings.get('ACQUISITION_BITTORRENT_QUEUE')
                Ayumi.debug("Connecting to queue: {}".format(queue_name))
                queue = rabbitpy.Queue(channel, queue_name)
                queue.declare(passive=True)

                Ayumi.info('Now listening for messages on queue: {}...'.format(
                    queue_name), color=Ayumi.LYELLOW)

                for message in queue.consume(prefetch=1):

                    Ayumi.info(
                        "Received new message, starting...", color=Ayumi.CYAN)

                    feeditem_preprocess = _load_amqp_message_body(message)
                    Ayumi.debug('Loaded message raw: "{}"'.format(
                        feeditem_preprocess))
                    if not feeditem_preprocess or not metsuke.validate_feeditem(feeditem_preprocess):
                        Ayumi.error('Invalid message received, rejecting. Output: "{}"'.format(
                            feeditem_preprocess), color=Ayumi.RED)
                        message.reject()
                        continue

                    # Load initial data
                    feeditem: metsuke.FeedItem = metsuke.generate_feeditem(
                        feeditem_preprocess)
                    shows_map = _load_shows_map()
                    overload_title = feeditem.show_title
                    Ayumi.info(
                        'Setting overload title: "{}"'.format(overload_title))
                    # If there is a central override, use it instead.
                    if _strip_title(anitopy.parse(feeditem.title)['anime_title']) in shows_map:
                        central_overload_title = shows_map[_strip_title(
                            feeditem.title)]
                        Ayumi.info('Overwriting overload title with central overload title: "{}"'.format(
                            central_overload_title))
                        overload_title = central_overload_title

                    with tempfile.TemporaryDirectory() as temp_dir:

                        Ayumi.debug(
                            'Created temporary directory under path: "{}"'.format(temp_dir))

                        # Download the episode
                        try:
                            res = subprocess.run(
                                [
                                    "aria2c",
                                    "--seed-time=0",
                                    "--rpc-save-upload-metadata=false",
                                    "--bt-save-metadata=false",
                                    "--dir={}".format(temp_dir),
                                    feeditem.link
                                ]
                            )
                            if res.returncode != 0:
                                Ayumi.warning(
                                    "Aria2 did not return a 0 exit code, assuming download errored and nacking.", color=Ayumi.RED)
                                message.nack()
                                continue
                        except subprocess.TimeoutExpired:
                            Ayumi.warning(
                                "Download via webtorrent timed out - nacking.", color=Ayumi.RED)
                            message.nack()
                            continue

                        if res.returncode != 0:
                            Ayumi.warning(
                                "Webtorrent did not have a return code of 0, nacking.", color=Ayumi.RED)
                            message.nack()
                            continue

                        # Rename it
                        potential_files = [f for f in os.listdir(
                            temp_dir) if f.endswith(".mkv")]
                        Ayumi.debug(
                            "Loaded potential files: {}".format(potential_files))
                        if len(potential_files) != 1:
                            Ayumi.warning(
                                "Found more than one .mkv file, rejecting this job.", color=Ayumi.RED)
                            message.reject()
                            continue
                        dl_file = potential_files[0]
                        Ayumi.info('Found file: "{}"'.format(dl_file))
                        dl_file_path = os.path.abspath(
                            '{}/{}'.format(_clean_title(temp_dir), potential_files[0]))
                        Ayumi.debug(
                            'dl_file_path: "{}"'.format(dl_file_path))

                        # Remove unneeded files
                        # TODO: THIS IS A HOTFIX, CHANGE LOGIC IN B2
                        bad_files = [f for f in os.listdir(
                            temp_dir) if not f.endswith(".mkv")]
                        Ayumi.debug("Found bad files: {}".format(bad_files))
                        for bf in bad_files:
                            try:
                                Ayumi.debug("Removing bad file: {}".format(bf))
                                os.remove(
                                    '{}/{}'.format(_clean_title(temp_dir), bf))
                            except:
                                Ayumi.debug("Removing bad tree: {}".format(bf))
                                shutil.rmtree(
                                    '{}/{}'.format(_clean_title(temp_dir), bf))

                        # Move the file to proper layout with updated name
                        dl_file_new_name = _generate_new_filename(dl_file)
                        Ayumi.info('Generated new episode name: "{}"'.format(
                            dl_file_new_name))
                        dl_file_new_dir = "{}/{}".format(
                            temp_dir, overload_title)
                        Ayumi.debug(
                            'dl_file_new_dir: "{}"'.format(dl_file_new_dir))
                        dl_file_new_path = "{}/{}".format(
                            dl_file_new_dir, dl_file_new_name)
                        Ayumi.debug(
                            'dl_file_new_path: "{}"'.format(
                                dl_file_new_path))
                        Ayumi.debug('Moving "{}" to "{}"'.format(
                            dl_file_path, dl_file_new_path))
                        os.mkdir(dl_file_new_dir)
                        shutil.move(dl_file_path, dl_file_new_path)

                        # Upload the file to rclone destination
                        with tempfile.NamedTemporaryFile(suffix=".conf", mode="w+b") as rconf:
                            rconf.write(str.encode(
                                settings.get("RCLONE_CONFIG_FILE")))
                            rconf.flush()
                            Ayumi.debug(
                                'Created temporary rclone file under path: "{}"'.format(rconf.name))
                            rclone_dest = _clean_title(settings.get(
                                "ACQUISITION_BITTORRENT_RCLONE_DEST"))
                            rclone_flags = settings.get("RCLONE_FLAGS", "")
                            command = [
                                "rclone", "--config={}".format(rconf.name), "copy", temp_dir, rclone_dest]
                            command.extend(rclone_flags.split())
                            Ayumi.debug(
                                'Rclone command to be run: "{}"'.format(command))

                            try:
                                Ayumi.info(
                                    'Now uploading new blob to: "{}"'.format(rclone_dest))
                                rclone_res = subprocess.run(
                                    command, timeout=3600)
                                if rclone_res.returncode != 0:
                                    Ayumi.warning('Rclone returned non-zero code of {}, nacking.'.format(
                                        rclone_res.returncode), color=Ayumi.LRED)
                                    message.nack()
                            except subprocess.TimeoutExpired:
                                Ayumi.warning(
                                    'Rclone upload timed out, nacking.', color=Ayumi.LRED)
                                message.nack()
                                continue

                        # Fetch information on the file to create a job
                        new_message = rabbitpy.Message(channel, dumps(
                            {
                                "show": overload_title,
                                "episode": dl_file_new_name,
                                "filesize": int(os.path.getsize(dl_file_new_path)),
                                "sub": "SOFTSUB"
                            }
                        ))
                        acquisition_bittorrent_exchange_name = settings.get(
                            'ACQUISITION_BITTORRENT_EXCHANGE')
                        Ayumi.info('Sending to exchange: "{}"'.format(
                            acquisition_bittorrent_exchange_name), color=Ayumi.CYAN)
                        while not new_message.publish(acquisition_bittorrent_exchange_name, mandatory=True):
                            Ayumi.warning(
                                "Failed to publish feed item, trying again in 60 seconds")
                            sleep(60)
                        Ayumi.info("Published feed item with title: " +
                                   overload_title, color=Ayumi.LGREEN)

                    message.ack()

    except rabbitpy.exceptions.AMQPConnectionForced:
        Ayumi.warning(
            "Operator manually closed RabbitMQ connection, shutting down.", color=Ayumi.LYELLOW)
        return
Esempio n. 28
0
    def search(self, query, ep, is_movie=False):
        url = f'{self.url}{query}{self.opts}'
        res = requests.get(url)
        if not res.status_code == requests.codes.ok:
            return False
        soup = BeautifulSoup(res.content, features='lxml')
        torrents = soup.findAll('tr', {'class': 'success'})

        search_results = []
        for torrent in torrents:
            info = torrent.select('td', {'class': 'text-center'})
            img = info[0].select('img')
            img_desc = img[0].get('alt', 'null')

            if not 'Anime - English-translated' in img_desc:
                print(img_desc)
                continue

            title_info = info[1].select('a')
            if len(title_info) > 1:
                title = title_info[1].get('title', 'null')
            else:
                title = title_info[0].get('title', 'null')
            # title can then be run through the parser from nyaasi to extract relevant info
            magnet = ''
            torrent_links = info[2].select('a')
            for torrent_link in torrent_links:
                link = torrent_link.get('href', 'null')
                if link.startswith('magnet'):
                    magnet = link
            file_size = info[3].text
            date_upload = info[4].text
            seeders = info[5].text

            # don't bother if 0 seeders
            if int(seeders) < 1:
                continue

            # returns
            #   'anime_season'
            #   'anime_title':
            #   'audio_term':
            #   'episode_number
            #   'file_name':
            #   'release_group':
            #   'source':
            #   'video_resolution':
            #   'video_term':
            title_info = anitopy.parse(title)
            # if no episode_number, assume it's batch

            if not is_movie:
                episode_number = title_info.get('episode_number', None)
                if not episode_number:
                    episode_number = 'Batch'

                if not isinstance(episode_number, list):
                    if not episode_number.isdigit():
                        episode_number = 'Batch'
                    elif not episode_number == ep:
                        # wrong episode, check next torrent
                        continue

                info_obj = {
                    'title': title,
                    'magnet': magnet,
                    'size': file_size,
                    'uploaded on': date_upload,
                    'seeders': seeders,
                    'resolution': title_info.get('video_resolution',
                                                 'undefined'),
                    'episode_number': episode_number,
                    'season': title_info.get('anime_season', '1')
                }
            else:
                info_obj = {
                    'title': title,
                    'magnet': magnet,
                    'seeders': seeders,
                    'resolution': title_info.get('video_resolution',
                                                 'undefined')
                }
            search_results.append(info_obj)
        return search_results