Esempio n. 1
0
 def logSuccess(release):
     release = FailedHistory.prepareFailedName(release)
     for dbData in [
             x['doc'] for x in FailedDB().db.all('history', with_doc=True)
             if x['doc']['release'] == release
     ]:
         FailedDB().db.delete(dbData)
Esempio n. 2
0
 def trimHistory():
     """Trims history table to 1 month of history from today"""
     date = str((datetime.today() - timedelta(days=30)).strftime(
         History.date_format))
     for dbData in [
             x['doc'] for x in FailedDB().db.all('history', with_doc=True)
             if x['doc']['date'] < date
     ]:
         FailedDB().db.delete(dbData)
Esempio n. 3
0
    def findFailedRelease(epObj):
        """
        Find releases in history by show ID and season.
        Return None for release if multiple found or no release found.
        """

        release = None
        provider = None

        # Clear old snatches for this release if any exist
        dbData = sorted([
            x['doc'] for x in FailedDB().db.get_many(
                'history', epObj.show.indexerid, with_doc=True)
            if x['doc']['season'] == epObj.season
            and x['doc']['episode'] == epObj.episode
        ],
                        key=lambda d: d['date'])

        [FailedDB().db.delete(x) for x in dbData[1::]]

        # Search for release in snatch history
        for dbData in [
                x['doc'] for x in FailedDB().db.get_many(
                    'history', epObj.show.indexerid, with_doc=True)
                if x['doc']['season'] == epObj.season
                and x['doc']['episode'] == epObj.episode
        ]:

            release = str(dbData["release"])
            provider = str(dbData["provider"])
            date = dbData["date"]

            # Clear any incomplete snatch records for this release if any exist
            for x in [
                    x['doc']
                    for x in FailedDB().db.all('history', with_doc=True)
            ]:
                if x['release'] == release and x['date'] != date:
                    FailedDB().db.delete(x)

            # Found a previously failed release
            sickrage.srCore.srLogger.debug(
                "Failed release found for season (%s): (%s)" %
                (epObj.season, dbData["release"]))

            return release, provider

        # Release was not found
        sickrage.srCore.srLogger.debug(
            "No releases found for season (%s) of (%s)" %
            (epObj.season, epObj.show.indexerid))

        return release, provider
Esempio n. 4
0
    def logFailed(release):
        log_str = ""
        size = -1
        provider = ""

        release = FailedHistory.prepareFailedName(release)

        dbData = [
            x['doc']
            for x in FailedDB().db.get_many('history', release, with_doc=True)
        ]

        if len(dbData) == 0:
            sickrage.srCore.srLogger.warning(
                "Release not found in snatch history.")
        elif len(dbData) > 1:
            sickrage.srCore.srLogger.warning(
                "Multiple logged snatches found for release")
            sizes = len(set(x["size"] for x in dbData))
            providers = len(set(x["provider"] for x in dbData))
            if sizes == 1:
                sickrage.srCore.srLogger.warning(
                    "However, they're all the same size. Continuing with found size."
                )
                size = dbData[0]["size"]
            else:
                sickrage.srCore.srLogger.warning(
                    "They also vary in size. Deleting the logged snatches and recording this release with no size/provider"
                )
                for result in dbData:
                    FailedHistory.deleteLoggedSnatch(result["release"],
                                                     result["size"],
                                                     result["provider"])

            if providers == 1:
                sickrage.srCore.srLogger.info(
                    "They're also from the same provider. Using it as well.")
                provider = dbData[0]["provider"]
        else:
            size = dbData[0]["size"]
            provider = dbData[0]["provider"]

        if not FailedHistory.hasFailed(release, size, provider):
            FailedDB().db.insert({
                '_t': 'failed',
                'release': release,
                'size': size,
                'provider': provider
            })

        FailedHistory.deleteLoggedSnatch(release, size, provider)

        return log_str
Esempio n. 5
0
    def deleteLoggedSnatch(release, size, provider):
        """
        Remove a snatch from history

        :param release: release to delete
        :param size: Size of release
        :param provider: Provider to delete it from
        """
        release = FailedHistory.prepareFailedName(release)
        for dbData in [
                x['doc'] for x in FailedDB().db.all('history', with_doc=True)
                if x['doc']['release'] == release and x['doc']['size'] == size
                and x['doc']['provider'] == provider
        ]:
            FailedDB().db.delete(dbData)
Esempio n. 6
0
    def logSnatch(searchResult):
        """
        Logs a successful snatch

        :param searchResult: Search result that was successful
        """
        logDate = datetime.today().strftime(History.date_format)
        release = FailedHistory.prepareFailedName(searchResult.name)

        providerClass = searchResult.provider
        if providerClass is not None:
            provider = providerClass.name
        else:
            provider = "unknown"

        show_obj = searchResult.episodes[0].show

        for episode in searchResult.episodes:
            FailedDB().db.insert({
                '_t': 'history',
                'date': logDate,
                'size': searchResult.size,
                'release': release,
                'provider': provider,
                'showid': show_obj.indexerid,
                'season': episode.season,
                'episode': episode.episode,
                'old_status': episode.status
            })
Esempio n. 7
0
    def revertFailedEpisode(epObj):
        """Restore the episodes of a failed download to their original state"""
        dbData = [
            x['doc'] for x in FailedDB().db.all('history', with_doc=True)
            if x['doc']['showid'] == epObj.show.indexerid
            and x['doc']['season'] == epObj.season
        ]

        history_eps = dict([(res["episode"], res) for res in dbData])

        try:
            sickrage.srCore.srLogger.info(
                "Reverting episode (%s, %s): %s" %
                (epObj.season, epObj.episode, epObj.name))
            with epObj.lock:
                if epObj.episode in history_eps:
                    sickrage.srCore.srLogger.info("Found in history")
                    epObj.status = history_eps[epObj.episode]['old_status']
                else:
                    sickrage.srCore.srLogger.warning(
                        "WARNING: Episode not found in history. Setting it back to WANTED"
                    )
                    epObj.status = WANTED
                    epObj.saveToDB()

        except EpisodeNotFoundException as e:
            sickrage.srCore.srLogger.warning(
                "Unable to create episode, please set its status manually: {}".
                format(e.message))
Esempio n. 8
0
 def setUp(self):
     super(SiCKRAGETestDBCase, self).setUp()
     sickrage.app.main_db = MainDB()
     sickrage.app.cache_db = CacheDB()
     sickrage.app.failed_db = FailedDB()
     for db in [
             sickrage.app.main_db, sickrage.app.cache_db,
             sickrage.app.failed_db
     ]:
         db.initialize()
Esempio n. 9
0
def setUp_test_db(force=False):
    """upgrades the db to the latest version
    """

    # remove old db files
    tearDown_test_db()

    # upgrade main
    MainDB().initialize()

    # upgrade cache
    CacheDB().initialize()

    # upgrade failed
    FailedDB().initialize()
Esempio n. 10
0
    def hasFailed(release, size, provider="%"):
        """
        Returns True if a release has previously failed.

        If provider is given, return True only if the release is found
        with that specific provider. Otherwise, return True if the release
        is found with any provider.

        :param release: Release name to record failure
        :param size: Size of release
        :param provider: Specific provider to search (defaults to all providers)
        :return: True if a release has previously failed.
        """

        release = FailedHistory.prepareFailedName(release)
        dbData = [
            x['doc']
            for x in FailedDB().db.get_many('failed', release, with_doc=True)
            if x['doc']['size'] == size and x['doc']['provider'] == provider
        ]

        return len(dbData) > 0
Esempio n. 11
0
def setUp_test_db(force=False):
    """upgrades the db to the latest version
    """

    global TESTDB_INITALIZED

    if not TESTDB_INITALIZED or force:
        # remove old db files
        tearDown_test_db()

        # upgrade main
        MainDB().initialize()

        # upgrade cache
        CacheDB().initialize()

        # upgrade failed
        FailedDB().initialize()

        # populate scene exceiptions table
        # retrieve_exceptions(False, False)

        TESTDB_INITALIZED = True
Esempio n. 12
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time(
        ) - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history',
                                            'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time(
        )
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(hours=self.config.version_updater_freq),
            name=self.version_updater.name,
            id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(update_network_dict,
                               IntervalTrigger(days=1),
                               name="TZUPDATER",
                               id="TZUPDATER")

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

        # start ioloop
        self.io_loop.start()
Esempio n. 13
0
    def __init__(self):
        self.started = False
        self.io_loop = IOLoop.current()

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []
Esempio n. 14
0
    def __init__(self):
        self.started = False
        self.io_loop = IOLoop.current()

        # process id
        self.PID = os.getpid()

        # cpu count
        self.CPU_COUNT = cpu_count()

        # generate notifiers dict
        self.notifiersDict = AttrDict(libnotify=LibnotifyNotifier(),
                                      kodi_notifier=KODINotifier(),
                                      plex_notifier=PLEXNotifier(),
                                      emby_notifier=EMBYNotifier(),
                                      nmj_notifier=NMJNotifier(),
                                      nmjv2_notifier=NMJv2Notifier(),
                                      synoindex_notifier=synoIndexNotifier(),
                                      synology_notifier=synologyNotifier(),
                                      pytivo_notifier=pyTivoNotifier(),
                                      growl_notifier=GrowlNotifier(),
                                      prowl_notifier=ProwlNotifier(),
                                      libnotify_notifier=LibnotifyNotifier(),
                                      pushover_notifier=PushoverNotifier(),
                                      boxcar_notifier=BoxcarNotifier(),
                                      boxcar2_notifier=Boxcar2Notifier(),
                                      nma_notifier=NMA_Notifier(),
                                      pushalot_notifier=PushalotNotifier(),
                                      pushbullet_notifier=PushbulletNotifier(),
                                      freemobile_notifier=FreeMobileNotifier(),
                                      twitter_notifier=TwitterNotifier(),
                                      trakt_notifier=TraktNotifier(),
                                      email_notifier=EmailNotifier())

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []
Esempio n. 15
0
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = BackgroundScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

        self.USER_AGENT = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))

        self.SYS_ENCODING = get_sys_encoding()

        # patch modules with encoding kludge
        patch_modules()