Example #1
0
    def delete(self, show_id, season_id, session):
        """ Deletes all season releases by show ID and season ID """
        try:
            db.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            season = db.season_by_id(season_id, session)
        except NoResultFound:
            raise NotFoundError('seasons with ID %s not found' % season_id)
        if not db.season_in_show(show_id, season_id):
            raise BadRequest('season with id %s does not belong to show %s' %
                             (season_id, show_id))

        args = release_delete_parser.parse_args()
        downloaded = args.get('downloaded') is True if args.get(
            'downloaded') is not None else None
        release_items = []
        for release in season.releases:
            if (downloaded and release.downloaded
                    or downloaded is False and not release.downloaded
                    or not downloaded):
                release_items.append(release)

        for release in release_items:
            if args.get('forget'):
                fire_event('forget', release.title)
            db.delete_season_release_by_id(release.id)
        return success_response(
            'successfully deleted all releases for season %s from show %s' %
            (season_id, show_id))
Example #2
0
    def __run_task_phase(self, phase):
        """Executes task phase, ie. call all enabled plugins on the task.

        Fires events:

        * task.execute.before_plugin
        * task.execute.after_plugin

        :param string phase: Name of the phase
        """
        if phase not in phase_methods:
            raise Exception('%s is not a valid task phase' % phase)
        # warn if no inputs, filters or outputs in the task
        if phase in ['input', 'filter', 'output']:
            if not self.manager.unit_test:
                # Check that there is at least one manually configured plugin for these phases
                for p in self.plugins(phase):
                    if not p.builtin:
                        break
                else:
                    if phase not in self.suppress_warnings:
                        if phase == 'filter':
                            log.warning('Task does not have any filter plugins to accept entries. '
                                        'You need at least one to accept the entries you  want.')
                        else:
                            log.warning('Task doesn\'t have any %s plugins, you should add (at least) one!' % phase)

        for plugin in self.plugins(phase):
            # Abort this phase if one of the plugins disables it
            if phase in self.disabled_phases:
                return
            # store execute info, except during entry events
            self.current_phase = phase
            self.current_plugin = plugin.name

            if plugin.api_ver == 1:
                # backwards compatibility
                # pass method only task (old behaviour)
                args = (self,)
            else:
                # pass method task, copy of config (so plugin cannot modify it)
                args = (self, copy.copy(self.config.get(plugin.name)))

            # Hack to make task.session only active for a single plugin
            with Session() as session:
                self.session = session
                try:
                    fire_event('task.execute.before_plugin', self, plugin.name)
                    response = self.__run_plugin(plugin, phase, args)
                    if phase == 'input' and response:
                        # add entries returned by input to self.all_entries
                        for e in response:
                            e.task = self
                        self.all_entries.extend(response)
                finally:
                    fire_event('task.execute.after_plugin', self, plugin.name)
                self.session = None
        # check config hash for changes at the end of 'prepare' phase
        if phase == 'prepare':
            self.check_config_hash()
Example #3
0
    def delete(self, show_id, ep_id, rel_id, session):
        """ Delete episode release by show ID, episode ID and release ID """
        try:
            db.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            db.episode_by_id(ep_id, session)
        except NoResultFound:
            raise NotFoundError('episode with ID %s not found' % ep_id)
        try:
            release = db.episode_release_by_id(rel_id, session)
        except NoResultFound:
            raise NotFoundError('release with ID %s not found' % rel_id)
        if not db.episode_in_show(show_id, ep_id):
            raise BadRequest('episode with id %s does not belong to show %s' %
                             (ep_id, show_id))
        if not db.release_in_episode(ep_id, rel_id):
            raise BadRequest('release id %s does not belong to episode %s' %
                             (rel_id, ep_id))
        args = delete_parser.parse_args()
        if args.get('forget'):
            fire_event('forget', release.title)

        db.delete_episode_release_by_id(rel_id)
        return success_response(
            'successfully deleted release %d from episode %d' %
            (rel_id, ep_id))
Example #4
0
    def daemon_command(self, options: argparse.Namespace) -> None:
        """
        Handles the 'daemon' CLI command.

        Fires events:

        * manager.daemon.started
        * manager.daemon.completed

        :param options: argparse options
        """

        # Import API so it can register to daemon.started event
        if options.action == 'start':
            if self.is_daemon:
                logger.error('Daemon already running for this config.')
                return
            elif self.task_queue.is_alive():
                logger.error(
                    'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.'
                )
                return
            if options.daemonize:
                self.daemonize()
            if options.autoreload_config:
                self.autoreload_config = True
            try:
                signal.signal(signal.SIGTERM, self._handle_sigterm)
            except ValueError as e:
                # If flexget is being called from another script, e.g. windows service helper, and we are not the
                # main thread, this error will occur.
                logger.debug('Error registering sigterm handler: {}', e)
            self.is_daemon = True
            fire_event('manager.daemon.started', self)
            self.task_queue.start()
            self.ipc_server.start()
            self.task_queue.wait()
            fire_event('manager.daemon.completed', self)
        elif options.action in ['stop', 'reload-config', 'status']:
            if not self.is_daemon:
                logger.error('There does not appear to be a daemon running.')
                return
            if options.action == 'status':
                logger.info('Daemon running. (PID: {})', os.getpid())
            elif options.action == 'stop':
                tasks = ('all queued tasks (if any) have' if options.wait else
                         'currently running task (if any) has')
                logger.info(
                    'Daemon shutdown requested. Shutdown will commence when {} finished executing.',
                    tasks,
                )
                self.shutdown(options.wait)
            elif options.action == 'reload-config':
                logger.info('Reloading config from disk.')
                try:
                    self.load_config()
                except ValueError as e:
                    logger.error('Error loading config: {}', e.args[0])
                else:
                    logger.info('Config successfully reloaded from disk.')
Example #5
0
    def acquire_lock(self, event=True):
        """
        :param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
        """
        acquired = False
        try:
            # Don't do anything if we already have a lock. This means only the outermost call will release the lock file
            if not self._has_lock:
                # Exit if there is an existing lock.
                if self.check_lock():
                    with io.open(self.lockfile, encoding='utf-8') as f:
                        pid = f.read()
                    print('Another process (%s) is running, will exit.' % pid.split('\n')[0], file=sys.stderr)
                    print('If you\'re sure there is no other instance running, delete %s' % self.lockfile,
                          file=sys.stderr)
                    sys.exit(1)

                self._has_lock = True
                self.write_lock()
                acquired = True
                if event:
                    fire_event('manager.lock_acquired', self)
            yield
        finally:
            if acquired:
                self.release_lock()
                self._has_lock = False
Example #6
0
    def shutdown(self, finish_queue=True):
        """
        Application is being exited

        :param bool finish_queue: Should scheduler finish the task queue
        """
        # Wait for scheduler to finish
        self.scheduler.shutdown(finish_queue=finish_queue)
        try:
            self.scheduler.wait()
        except KeyboardInterrupt:
            log.debug('Not waiting for scheduler shutdown due to ctrl-c')
            # show real stack trace in debug mode
            if manager.options.debug:
                raise
            print('**** Keyboard Interrupt ****')
        fire_event('manager.shutdown', self)
        if not self.unit_test:  # don't scroll "nosetests" summary results when logging is enabled
            log.debug('Shutting down')
        self.engine.dispose()
        # remove temporary database used in test mode
        if self.options.test:
            if not 'test' in self.db_filename:
                raise Exception('trying to delete non test database?')
            if self._has_lock:
                os.remove(self.db_filename)
                log.info('Removed test database')
        if not self.unit_test:  # don't scroll "nosetests" summary results when logging is enabled
            log.debug('Shutdown completed')
Example #7
0
    def delete(self, show_id, ep_id, session):
        """ Forgets episode by show ID and episode ID """
        try:
            show = series.show_by_id(show_id, session=session)
        except NoResultFound:
            return {
                'status': 'error',
                'message': 'Show with ID %s not found' % show_id
            }, 404
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            return {
                'status': 'error',
                'message': 'Episode with ID %s not found' % ep_id
            }, 414
        if not series.episode_in_show(show_id, ep_id):
            return {
                'status':
                'error',
                'message':
                'Episode with id %s does not belong to show %s' %
                (ep_id, show_id)
            }, 400

        args = delete_parser.parse_args()
        if args.get('delete_seen'):
            for release in episode.releases:
                fire_event('forget', release.title)

        series.forget_episodes_by_id(show_id, ep_id)
        return {}
Example #8
0
    def __init__(self, options):
        global manager
        assert not manager, "Only one instance of Manager should be created at a time!"
        manager = self
        self.options = options
        self.config_base = None
        self.config_name = None
        self.db_filename = None
        self.engine = None
        self.lockfile = None

        self.config = {}
        self.feeds = {}

        # shelve (FlexGet 0.9.x)
        self.shelve_session = None

        self.initialize()

        # cannot be imported at module level because of circular references
        from flexget.utils.simple_persistence import SimplePersistence

        self.persist = SimplePersistence("manager")

        log.debug("sys.defaultencoding: %s" % sys.getdefaultencoding())
        log.debug("sys.getfilesystemencoding: %s" % sys.getfilesystemencoding())
        log.debug("os.path.supports_unicode_filenames: %s" % os.path.supports_unicode_filenames)

        atexit.register(self.shutdown)

        fire_event("manager.upgrade", self)
        fire_event("manager.startup", self)
        self.db_cleanup()
Example #9
0
 def run_daemon():
     fire_event('manager.daemon.started', self)
     self.task_queue.start()
     self.ipc_server.start()
     self.task_queue.wait()
     fire_event('manager.daemon.completed', self)
     tray_icon.stop()
Example #10
0
    def daemon_command(self, options):
        """
        Handles the 'daemon' CLI command.

        Fires events:

        * manager.daemon.started
        * manager.daemon.completed

        :param options: argparse options
        """

        # Import API so it can register to daemon.started event
        if options.action == 'start':
            if self.is_daemon:
                log.error('Daemon already running for this config.')
                return
            elif self.task_queue.is_alive():
                log.error('Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.')
                return
            if options.daemonize:
                self.daemonize()
            if options.config_autoreload:
                self.config_autoreload = True
            try:
                signal.signal(signal.SIGTERM, self._handle_sigterm)
            except ValueError as e:
                # If flexget is being called from another script, e.g. windows service helper, and we are not the
                # main thread, this error will occur.
                log.debug('Error registering sigterm handler: %s' % e)
            self.is_daemon = True
            fire_event('manager.daemon.started', self)
            self.task_queue.start()
            self.ipc_server.start()
            self.task_queue.wait()
            fire_event('manager.daemon.completed', self)
        elif options.action in ['stop', 'reload', 'status', 'enable-autoreload', 'disable-autoreload']:
            if not self.is_daemon:
                log.error('There does not appear to be a daemon running.')
                return
            if options.action == 'status':
                log.info('Daemon running. (PID: %s)' % os.getpid())
            elif options.action == 'stop':
                tasks = 'all queued tasks (if any) have' if options.wait else 'currently running task (if any) has'
                log.info('Daemon shutdown requested. Shutdown will commence when %s finished executing.' % tasks)
                self.shutdown(options.wait)
            elif options.action == 'reload':
                log.info('Reloading config from disk.')
                try:
                    self.load_config()
                except ValueError as e:
                    log.error('Error loading config: %s' % e.args[0])
                else:
                    log.info('Config successfully reloaded from disk.')
            elif options.action == 'enable-autoreload':
                log.info('Enabled automatic config reloading')
                self.config_autoreload = True
            elif options.action == 'disable-autoreload':
                log.info('Disabled automatic config reloading')
                self.config_autoreload = False
Example #11
0
    def execute_command(self, options):
        """
        Send execute command to daemon through IPC or perform execution
        on current process.

        Fires events:

        * manager.execute.completed

        :param options: argparse options
        """
        # If a daemon is started, send the execution to the daemon
        ipc_info = self.check_ipc_info()
        if ipc_info:
            try:
                log.info(
                    'There is a daemon running for this config. Sending execution to running daemon.'
                )
                client = IPCClient(ipc_info['port'], ipc_info['password'])
            except ValueError as e:
                log.error(e)
            else:
                client.execute(dict(options, loglevel=self.options.loglevel))
            self.shutdown()
            return
        # Otherwise we run the execution ourselves
        with self.acquire_lock():
            fire_event('manager.execute.started', self)
            self.task_queue.start()
            self.execute(options)
            self.shutdown(finish_queue=True)
            self.task_queue.wait()
            fire_event('manager.execute.completed', self)
Example #12
0
    def initialize(self):
        """
        Separated from __init__ so that unit tests can modify options before loading config.

        :raises: `IOError` if config is not found. `ValueError` if config is malformed.
        """
        self.setup_yaml()
        try:
            self.find_config(create=(self.options.cli_command == 'webui'))
        except:
            logger.start(level=self.options.loglevel.upper(), to_file=False)
            raise
        else:
            log_file = os.path.expanduser(manager.options.logfile)
            # If an absolute path is not specified, use the config directory.
            if not os.path.isabs(log_file):
                log_file = os.path.join(self.config_base, log_file)
            logger.start(log_file,
                         self.options.loglevel.upper(),
                         to_console=not self.options.execute.cron)

        self.init_sqlalchemy()
        fire_event('manager.initialize', self)
        try:
            self.load_config()
        except ValueError as e:
            log.critical('Failed to load config file: %s' % e.args[0])
            raise
Example #13
0
    def delete(self, show_id, ep_id, session):
        """ Forgets episode by show ID and episode ID """
        try:
            show = series.show_by_id(show_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Show with ID %s not found' % show_id
                    }, 404
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Episode with ID %s not found' % ep_id
                    }, 414
        if not series.episode_in_show(show_id, ep_id):
            return {'status': 'error',
                    'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400

        args = delete_parser.parse_args()
        if args.get('delete_seen'):
            for release in episode.releases:
                fire_event('forget', release.title)

        series.forget_episodes_by_id(show_id, ep_id)
        return {}
Example #14
0
    def execute_command(self, options):
        """
        Handles the 'execute' CLI command.

        If there is already a task queue running in this process, adds the execution to the queue.
        If FlexGet is being invoked with this command, starts up a task queue and runs the execution.

        Fires events:

        * manager.execute.started
        * manager.execute.completed

        :param options: argparse options
        """
        fire_event('manager.execute.started', self, options)
        if self.task_queue.is_alive():
            if len(self.task_queue):
                log.verbose(
                    'There is a task already running, execution queued.')
            finished_events = self.execute(
                options,
                output=logger.get_capture_stream(),
                loglevel=logger.get_capture_loglevel())
            if not options.cron:
                # Wait until execution of all tasks has finished
                for _, _, event in finished_events:
                    event.wait()
        else:
            self.task_queue.start()
            self.ipc_server.start()
            self.execute(options)
            self.shutdown(finish_queue=True)
            self.task_queue.wait()
        fire_event('manager.execute.completed', self, options)
Example #15
0
    def execute(self):
        """
        Executes the the task.

        If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
        affect how execution is handled.

        - :attr:`.options.disable_phases` is a list of phases that are not enabled
          for this execution.
        - :attr:`.options.inject` is a list of :class:`Entry` instances used instead
          of running input phase.
        """

        try:
            if self.options.cron:
                self.manager.db_cleanup()
            fire_event('task.execute.started', self)
            while True:
                self._execute()
                # rerun task
                if self._rerun and self._rerun_count < self.max_reruns:
                    log.info('Rerunning the task in case better resolution can be achieved.')
                    self._rerun_count += 1
                    # TODO: Potential optimization is to take snapshots (maybe make the ones backlog uses built in
                    # instead of taking another one) after input and just inject the same entries for the rerun
                    self._all_entries = EntryContainer()
                    self._rerun = False
                    continue
                elif self._rerun:
                    log.info('Task has been re-run %s times already, stopping for now' % self._rerun_count)
                break
            fire_event('task.execute.completed', self)
        finally:
            self.finished_event.set()
Example #16
0
 def config(self, request):
     os.environ['FLEXGET_PLUGIN_PATH'] = request.fspath.dirpath().join('external_plugins').strpath
     plugin.load_plugins()
     # fire the config register event again so that task schema is rebuilt with new plugin
     fire_event('config.register')
     yield self._config
     del os.environ['FLEXGET_PLUGIN_PATH']
Example #17
0
    def acquire_lock(self, event=True):
        """
        :param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
        """
        acquired = False
        try:
            # Don't do anything if we already have a lock. This means only the outermost call will release the lock file
            if not self._has_lock:
                # Exit if there is an existing lock.
                if self.check_lock():
                    with io.open(self.lockfile, encoding='utf-8') as f:
                        pid = f.read()
                    print('Another process (%s) is running, will exit.' %
                          pid.split('\n')[0],
                          file=sys.stderr)
                    print(
                        'If you\'re sure there is no other instance running, delete %s'
                        % self.lockfile,
                        file=sys.stderr)
                    sys.exit(1)

                self._has_lock = True
                self.write_lock()
                acquired = True
                if event:
                    fire_event('manager.lock_acquired', self)
            yield
        finally:
            if acquired:
                self.release_lock()
                self._has_lock = False
Example #18
0
    def db_cleanup(self, force: bool = False) -> None:
        """
        Perform database cleanup if cleanup interval has been met.

        Fires events:

        * manager.db_cleanup

          If interval was met. Gives session to do the cleanup as a parameter.

        :param bool force: Run the cleanup no matter whether the interval has been met.
        """
        expired = (self.persist.get('last_cleanup', datetime(1900, 1, 1)) <
                   datetime.now() - DB_CLEANUP_INTERVAL)
        if force or expired:
            logger.info('Running database cleanup.')
            with Session() as session:
                fire_event('manager.db_cleanup', self, session)
            # Try to VACUUM after cleanup
            fire_event('manager.db_vacuum', self)
            # Just in case some plugin was overzealous in its cleaning, mark the config changed
            self.config_changed()
            self.persist['last_cleanup'] = datetime.now()
        else:
            logger.debug('Not running db cleanup, last run {}',
                         self.persist.get('last_cleanup'))
Example #19
0
    def config_changed(self) -> None:
        """Makes sure that all tasks will have the config_modified flag come out true on the next run.
        Useful when changing the db and all tasks need to be completely reprocessed."""
        from flexget.task import config_changed

        config_changed()
        fire_event('manager.config_updated', self)
Example #20
0
    def db_cleanup(self, force=False):
        """
        Perform database cleanup if cleanup interval has been met.

        Fires events:

        * manager.db_cleanup

          If interval was met. Gives session to do the cleanup as a parameter.

        :param bool force: Run the cleanup no matter whether the interval has been met.
        """
        expired = self.persist.get('last_cleanup', datetime(
            1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL
        if force or expired:
            log.info('Running database cleanup.')
            session = Session()
            try:
                fire_event('manager.db_cleanup', self, session)
                session.commit()
            finally:
                session.close()
            # Just in case some plugin was overzealous in its cleaning, mark the config changed
            self.config_changed()
            self.persist['last_cleanup'] = datetime.now()
        else:
            log.debug('Not running db cleanup, last run %s' %
                      self.persist.get('last_cleanup'))
Example #21
0
 def config(self, request):
     os.environ['FLEXGET_PLUGIN_PATH'] = request.fspath.dirpath().join('external_plugins').strpath
     plugin.load_plugins()
     # fire the config register event again so that task schema is rebuilt with new plugin
     fire_event('config.register')
     yield self._config
     del os.environ['FLEXGET_PLUGIN_PATH']
Example #22
0
    def execute(self):
        """
        Executes the the task.

        If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
        affect how execution is handled.

        - :attr:`.options.disable_phases` is a list of phases that are not enabled
          for this execution.
        - :attr:`.options.inject` is a list of :class:`Entry` instances used instead
          of running input phase.
        """

        try:
            self.finished_event.clear()
            if self.options.cron:
                self.manager.db_cleanup()
            fire_event('task.execute.started', self)
            while True:
                self._execute()
                # rerun task
                if self._rerun and self._rerun_count < self.max_reruns and self._rerun_count < Task.RERUN_MAX:
                    log.info('Rerunning the task in case better resolution can be achieved.')
                    self._rerun_count += 1
                    # TODO: Potential optimization is to take snapshots (maybe make the ones backlog uses built in
                    # instead of taking another one) after input and just inject the same entries for the rerun
                    self._all_entries = EntryContainer()
                    self._rerun = False
                    continue
                elif self._rerun:
                    log.info('Task has been re-run %s times already, stopping for now' % self._rerun_count)
                break
            fire_event('task.execute.completed', self)
        finally:
            self.finished_event.set()
Example #23
0
    def initialize(self):
        """
        Separated from __init__ so that unit tests can modify options before loading config.

        :raises: `IOError` if config is not found. `ValueError` if config is malformed.
        """
        self.setup_yaml()
        try:
            self.find_config(create=(self.options.cli_command == 'webui'))
        except:
            logger.start(level=self.options.loglevel.upper(), to_file=False)
            raise
        else:
            log_file = os.path.expanduser(manager.options.logfile)
            # If an absolute path is not specified, use the config directory.
            if not os.path.isabs(log_file):
                log_file = os.path.join(self.config_base, log_file)
            logger.start(log_file, self.options.loglevel.upper(), to_console=not self.options.execute.cron)

        self.init_sqlalchemy()
        fire_event('manager.initialize', self)
        try:
            self.load_config()
        except ValueError as e:
            log.critical('Failed to load config file: %s' % e.args[0])
            raise
Example #24
0
    def delete(self, show_id, ep_id, session):
        """ Deletes all episodes releases by show ID and episode ID """
        try:
            series.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            raise NotFoundError('episode with ID %s not found' % ep_id)
        if not series.episode_in_show(show_id, ep_id):
            raise BadRequest('episode with id %s does not belong to show %s' % (ep_id, show_id))

        args = release_delete_parser.parse_args()
        downloaded = args.get('downloaded') is True if args.get('downloaded') is not None else None
        release_items = []
        for release in episode.releases:
            if downloaded and release.downloaded or downloaded is False and not release.downloaded or not downloaded:
                release_items.append(release)

        for release in release_items:
            if args.get('forget'):
                fire_event('forget', release.title)
            series.delete_release_by_id(release.id)
        return success_response('successfully deleted all releases for episode %s from show %s' % (ep_id, show_id))
Example #25
0
def load_plugins(extra_dirs=None):
    """
    Load plugins from the standard plugin paths.
    :param list extra_dirs: Extra directories from where plugins are loaded.
    """
    global plugins_loaded

    if not extra_dirs:
        extra_dirs = []

    # Add flexget.plugins directory (core plugins)
    extra_dirs.extend(_get_standard_plugins_path())

    start_time = time.time()
    # Import all the plugins
    _load_plugins_from_dirs(extra_dirs)
    _load_plugins_from_packages()
    # Register them
    fire_event('plugin.register')
    # Plugins should only be registered once, remove their handlers after
    remove_event_handlers('plugin.register')
    # After they have all been registered, instantiate them
    for plugin in list(plugins.values()):
        plugin.initialize()
    took = time.time() - start_time
    plugins_loaded = True
    log.debug('Plugins took %.2f seconds to load. %s plugins in registry.',
              took, len(plugins.keys()))
Example #26
0
    def db_cleanup(self, force=False):
        """
        Perform database cleanup if cleanup interval has been met.

        Fires events:

        * manager.db_cleanup

          If interval was met. Gives session to do the cleanup as a parameter.

        :param bool force: Run the cleanup no matter whether the interval has been met.
        """
        expired = self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL
        if force or expired:
            log.info('Running database cleanup.')
            session = Session()
            try:
                fire_event('manager.db_cleanup', self, session)
                session.commit()
            finally:
                session.close()
            # Just in case some plugin was overzealous in its cleaning, mark the config changed
            self.config_changed()
            self.persist['last_cleanup'] = datetime.now()
        else:
            log.debug('Not running db cleanup, last run %s' % self.persist.get('last_cleanup'))
Example #27
0
    def delete(self, show_id, season_id, session):
        """ Deletes all season releases by show ID and season ID """
        try:
            db.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            season = db.season_by_id(season_id, session)
        except NoResultFound:
            raise NotFoundError('seasons with ID %s not found' % season_id)
        if not db.season_in_show(show_id, season_id):
            raise BadRequest('season with id %s does not belong to show %s' % (season_id, show_id))

        args = release_delete_parser.parse_args()
        downloaded = args.get('downloaded') is True if args.get('downloaded') is not None else None
        release_items = []
        for release in season.releases:
            if (
                downloaded
                and release.downloaded
                or downloaded is False
                and not release.downloaded
                or not downloaded
            ):
                release_items.append(release)

        for release in release_items:
            if args.get('forget'):
                fire_event('forget', release.title)
            db.delete_season_release_by_id(release.id)
        return success_response(
            'successfully deleted all releases for season %s from show %s' % (season_id, show_id)
        )
Example #28
0
    def __init__(self, options):
        """
        :param options: optparse parsed options object
        """
        global manager
        assert not manager, 'Only one instance of Manager should be created at a time!'
        manager = self
        self.options = options
        self.config_base = None
        self.config_name = None
        self.db_filename = None
        self.engine = None
        self.lockfile = None
        self.database_uri = None

        self.config = {}
        self.feeds = {}

        self.initialize()

        # cannot be imported at module level because of circular references
        from flexget.utils.simple_persistence import SimplePersistence
        self.persist = SimplePersistence('manager')

        log.debug('sys.defaultencoding: %s' % sys.getdefaultencoding())
        log.debug('sys.getfilesystemencoding: %s' % sys.getfilesystemencoding())
        log.debug('os.path.supports_unicode_filenames: %s' % os.path.supports_unicode_filenames)

        fire_event('manager.upgrade', self)
        fire_event('manager.startup', self)
        self.db_cleanup()
Example #29
0
 def config_changed(self):
     """Makes sure that all tasks will have the config_modified flag come out true on the next run.
     Useful when changing the db and all tasks need to be completely reprocessed."""
     from flexget.task import config_changed
     for task in self.tasks:
         config_changed(task)
     fire_event('manager.config_updated', self)
Example #30
0
    def execute_command(self, options):
        """
        Send execute command to daemon through IPC or perform execution
        on current process.

        Fires events:

        * manager.execute.completed

        :param options: argparse options
        """
        # If a daemon is started, send the execution to the daemon
        ipc_info = self.check_ipc_info()
        if ipc_info:
            try:
                log.info('There is a daemon running for this config. Sending execution to running daemon.')
                client = IPCClient(ipc_info['port'], ipc_info['password'])
            except ValueError as e:
                log.error(e)
            else:
                client.execute(dict(options, loglevel=self.options.loglevel))
            self.shutdown()
            return
        # Otherwise we run the execution ourselves
        with self.acquire_lock():
            fire_event('manager.execute.started', self)
            self.task_queue.start()
            self.execute(options)
            self.shutdown(finish_queue=True)
            self.task_queue.wait()
            fire_event('manager.execute.completed', self)
Example #31
0
    def __run_task_phase(self, phase):
        """Executes task phase, ie. call all enabled plugins on the task.

        Fires events:

        * task.execute.before_plugin
        * task.execute.after_plugin

        :param string phase: Name of the phase
        """
        if phase not in phase_methods:
            raise Exception('%s is not a valid task phase' % phase)
        # warn if no inputs, filters or outputs in the task
        if phase in ['input', 'filter', 'output']:
            if not self.manager.unit_test:
                # Check that there is at least one manually configured plugin for these phases
                for p in self.plugins(phase):
                    if not p.builtin:
                        break
                else:
                    if phase == 'filter':
                        log.warning(
                            'Task does not have any filter plugins to accept entries. '
                            'You need at least one to accept the entries you  want.'
                        )
                    else:
                        log.warning(
                            'Task doesn\'t have any %s plugins, you should add (at least) one!'
                            % phase)

        for plugin in self.plugins(phase):
            # Abort this phase if one of the plugins disables it
            if phase in self.disabled_phases:
                return
            # store execute info, except during entry events
            self.current_phase = phase
            self.current_plugin = plugin.name

            if plugin.api_ver == 1:
                # backwards compatibility
                # pass method only task (old behaviour)
                args = (self, )
            else:
                # pass method task, copy of config (so plugin cannot modify it)
                args = (self, copy.copy(self.config.get(plugin.name)))

            # Hack to make task.session only active for a single plugin
            with Session() as session:
                self.session = session
                try:
                    fire_event('task.execute.before_plugin', self, plugin.name)
                    response = self.__run_plugin(plugin, phase, args)
                    if phase == 'input' and response:
                        # add entries returned by input to self.all_entries
                        for e in response:
                            e.task = self
                        self.all_entries.extend(response)
                finally:
                    fire_event('task.execute.after_plugin', self, plugin.name)
                self.session = None
Example #32
0
    def execute_command(self, options):
        """
        Handles the 'execute' CLI command.

        If there is already a task queue running in this process, adds the execution to the queue.
        If FlexGet is being invoked with this command, starts up a task queue and runs the execution.

        Fires events:

        * manager.execute.started
        * manager.execute.completed

        :param options: argparse options
        """
        fire_event('manager.execute.started', self, options)
        if self.task_queue.is_alive():
            if len(self.task_queue):
                log.verbose('There is a task already running, execution queued.')
            finished_events = self.execute(options, output=logger.get_capture_stream(),
                                           loglevel=logger.get_capture_loglevel())
            if not options.cron:
                # Wait until execution of all tasks has finished
                for task_id, task_name, event in finished_events:
                    event.wait()
        else:
            self.task_queue.start()
            self.ipc_server.start()
            self.execute(options)
            self.shutdown(finish_queue=True)
            self.task_queue.wait()
        fire_event('manager.execute.completed', self, options)
Example #33
0
    def delete(self, show_id, season_id, rel_id, session):
        """ Delete episode release by show ID, season ID and release ID """
        try:
            db.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            db.season_by_id(season_id, session)
        except NoResultFound:
            raise NotFoundError('season with ID %s not found' % season_id)
        try:
            release = db.season_release_by_id(rel_id, session)
        except NoResultFound:
            raise NotFoundError('release with ID %s not found' % rel_id)
        if not db.season_in_show(show_id, season_id):
            raise BadRequest('season with id %s does not belong to show %s' % (season_id, show_id))
        if not db.release_in_season(season_id, rel_id):
            raise BadRequest('release id %s does not belong to season %s' % (rel_id, season_id))
        args = delete_parser.parse_args()
        if args.get('forget'):
            fire_event('forget', release.title)

        db.delete_season_release_by_id(rel_id)
        return success_response(
            'successfully deleted release %d from season %d' % (rel_id, season_id)
        )
Example #34
0
    def delete(self, show_id, ep_id, session):
        """ Deletes all episodes releases by show ID and episode ID """
        try:
            series.show_by_id(show_id, session=session)
        except NoResultFound:
            raise NotFoundError('show with ID %s not found' % show_id)
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            raise NotFoundError('episode with ID %s not found' % ep_id)
        if not series.episode_in_show(show_id, ep_id):
            raise BadRequest('episode with id %s does not belong to show %s' % (ep_id, show_id))

        args = release_delete_parser.parse_args()
        downloaded = args.get('downloaded') is True if args.get('downloaded') is not None else None
        release_items = []
        for release in episode.releases:
            if downloaded and release.downloaded or downloaded is False and not release.downloaded or not downloaded:
                release_items.append(release)

        for release in release_items:
            if args.get('forget'):
                fire_event('forget', release.title)
            series.delete_release_by_id(release.id)
        return success_response('successfully deleted all releases for episode %s from show %s' % (ep_id, show_id))
Example #35
0
def load_plugins(extra_dirs=None):
    """
    Load plugins from the standard plugin paths.
    :param list extra_dirs: Extra directories from where plugins are loaded.
    """
    global plugins_loaded

    if not extra_dirs:
        extra_dirs = []

    # Add flexget.plugins directory (core plugins)
    extra_dirs.extend(_get_standard_plugins_path())

    start_time = time.time()
    # Import all the plugins
    _load_plugins_from_dirs(extra_dirs)
    _load_plugins_from_packages()
    # Register them
    fire_event('plugin.register')
    # Plugins should only be registered once, remove their handlers after
    remove_event_handlers('plugin.register')
    # After they have all been registered, instantiate them
    for plugin in list(plugins.values()):
        plugin.initialize()
    took = time.time() - start_time
    plugins_loaded = True
    log.debug('Plugins took %.2f seconds to load. %s plugins in registry.', took, len(plugins.keys()))
Example #36
0
    def shutdown(self, finish_queue=True):
        """
        Application is being exited

        :param bool finish_queue: Should scheduler finish the task queue
        """
        # Wait for scheduler to finish
        self.scheduler.shutdown(finish_queue=finish_queue)
        try:
            self.scheduler.wait()
        except KeyboardInterrupt:
            log.debug('Not waiting for scheduler shutdown due to ctrl-c')
            # show real stack trace in debug mode
            if manager.options.debug:
                raise
            print('**** Keyboard Interrupt ****')
        fire_event('manager.shutdown', self)
        if not self.unit_test:  # don't scroll "nosetests" summary results when logging is enabled
            log.debug('Shutting down')
        self.engine.dispose()
        # remove temporary database used in test mode
        if self.options.test:
            if not 'test' in self.db_filename:
                raise Exception('trying to delete non test database?')
            if self._has_lock:
                os.remove(self.db_filename)
                log.info('Removed test database')
        if not self.unit_test:  # don't scroll "nosetests" summary results when logging is enabled
            log.debug('Shutdown completed')
Example #37
0
    def delete(self, show_id, ep_id, session):
        """ Deletes all episodes releases by show ID and episode ID """
        try:
            show = series.show_by_id(show_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Show with ID %s not found' % show_id
                    }, 404
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Episode with ID %s not found' % ep_id
                    }, 414
        if not series.episode_in_show(show_id, ep_id):
            return {'status': 'error',
                    'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400

        args = release_delete_parser.parse_args()
        downloaded = args.get('downloaded') == True if args.get('downloaded') is not None else None
        release_items = []
        for release in episode.releases:
            if downloaded and release.downloaded or downloaded is False and not release.downloaded or not downloaded:
                release_items.append(release)
            if args.get('delete_seen'):
                fire_event('forget', release.title)

        for release in release_items:
            series.delete_release_by_id(release.id)
        return {}
Example #38
0
    def delete(self, show_id, ep_id, rel_id, session):
        ''' Delete episode release by show ID, episode ID and release ID '''
        try:
            show = series.show_by_id(show_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Show with ID %s not found' % show_id
                    }, 404
        try:
            episode = series.episode_by_id(ep_id, session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Episode with ID %s not found' % ep_id
                    }, 414
        try:
            release = series.release_by_id(rel_id, session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'Release with ID %s not found' % rel_id
                    }, 424
        if not series.episode_in_show(show_id, ep_id):
            return {'status': 'error',
                    'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
        if not series.release_in_episode(ep_id, rel_id):
            return {'status': 'error',
                    'message': 'Release with id %s does not belong to episode %s' % (rel_id, ep_id)}, 410
        args = delete_parser.parse_args()
        if args.get('delete_seen'):
            fire_event('forget', release.title)

        series.delete_release_by_id(rel_id)
        return {}
Example #39
0
    def execute_command(self, options):
        """
        Send execute command to daemon through IPC or perform execution
        on current process.

        Fires events:

        * manager.execute.completed

        :param options: argparse options
        """
        # If a daemon is started, send the execution to the daemon
        ipc_info = self.check_ipc_info()
        if ipc_info:
            client = IPCClient(ipc_info['port'], ipc_info['password'])
            client.execute(dict(options))
            self.shutdown()
            return
        # Otherwise we run the execution ourselves
        with self.acquire_lock():
            fire_event('manager.execute.started', self)
            self.scheduler.start(run_schedules=False)
            self.scheduler.execute(options)
            self.scheduler.shutdown(finish_queue=True)
            try:
                self.scheduler.wait()
            except KeyboardInterrupt:
                log.error('Got ctrl-c exiting after this task completes. Press ctrl-c again to abort this task.')
            else:
                fire_event('manager.execute.completed', self)
            self.shutdown(finish_queue=False)
Example #40
0
    def validate_config(self):
        """
        Check all root level keywords are valid.

        :returns: A list of `ValidationError`s
        """
        fire_event('manager.before_config_validate', self)
        return config_schema.process_config(self.config)
Example #41
0
 def run_daemon(tray_icon: 'TrayIcon' = None):
     fire_event('manager.daemon.started', self)
     self.task_queue.start()
     self.ipc_server.start()
     self.task_queue.wait()
     fire_event('manager.daemon.completed', self)
     if tray_icon:
         tray_icon.stop()
Example #42
0
    def validate_config(self):
        """
        Check all root level keywords are valid.

        :returns: A list of `ValidationError`s
        """
        fire_event('manager.before_config_validate', self)
        return config_schema.process_config(self.config)
Example #43
0
def get_schema():
    global _root_config_schema
    if _root_config_schema is None:
        _root_config_schema = {"type": "object", "properties": {}, "additionalProperties": False}
        fire_event("config.register")
        # TODO: Is /schema/root this the best place for this?
        register_schema("/schema/config", _root_config_schema)
    return _root_config_schema
Example #44
0
def get_schema():
    global _root_config_schema
    if _root_config_schema is None:
        _root_config_schema = {'type': 'object', 'properties': {}, 'additionalProperties': False}
        fire_event('config.register')
        # TODO: Is /schema/root this the best place for this?
        register_schema('/schema/config', _root_config_schema)
    return _root_config_schema
Example #45
0
    def execute(self, tasks=None, disable_phases=None, entries=None):
        """
        Iterate trough tasks and run them. If --learn is used download and output
        phases are disabled.

        :param list tasks: Optional list of task names to run, all tasks otherwise.
        :param list disable_phases: Optional list of phases to disabled
        :param list entries: Optional list of entries to pass into task(s).
            This will also cause task to disable input phase.
        """
        # Make a list of Task instances to execute
        if tasks is None:
            # Default to all tasks if none are specified
            run_tasks = self.tasks.values()
        else:
            # Turn the list of task names or instances into a list of instances
            run_tasks = []
            for task in tasks:
                if isinstance(task, basestring):
                    if task in self.tasks:
                        run_tasks.append(self.tasks[task])
                    else:
                        log.error('Task `%s` does not exist.' % task)
                else:
                    run_tasks.append(task)

        if not run_tasks:
            log.warning('There are no tasks to execute, please add some tasks')
            return

        disable_phases = disable_phases or []
        # when learning, skip few phases
        if self.options.learn:
            log.info('Disabling download and output phases because of %s' %
                     ('--reset' if self.options.reset else '--learn'))
            disable_phases.extend(['download', 'output'])

        fire_event('manager.execute.started', self)
        self.process_start(tasks=run_tasks)

        for task in sorted(run_tasks):
            if not task.enabled or task._abort:
                continue
            try:
                task.execute(disable_phases=disable_phases, entries=entries)
            except Exception as e:
                task.enabled = False
                log.exception('Task %s: %s' % (task.name, e))
            except KeyboardInterrupt:
                # show real stack trace in debug mode
                if self.options.debug:
                    raise
                print '**** Keyboard Interrupt ****'
                return

        self.process_end(tasks=run_tasks)
        fire_event('manager.execute.completed', self)
Example #46
0
    def test_variable_from_db(self, execute_task, manager):
        with Session() as session:
            s = Variables(variables={'test_variable_db': True})
            session.add(s)

        fire_event('manager.before_config_validate', manager.config, manager)

        task = execute_task('test_variable_from_db')
        assert len(task.accepted) == 1
Example #47
0
    def test_secret_from_db(self, execute_task, manager):
        with Session() as session:
            s = Secrets(secrets={"test_secret_db": True})
            session.add(s)

        fire_event("manager.before_config_validate", manager.config, manager)

        task = execute_task("test_secret_from_db")
        assert len(task.accepted) == 1
Example #48
0
    def test_variable_from_db(self, execute_task, manager):
        with Session() as session:
            s = Variables(variables={'test_variable_db': True})
            session.add(s)

        fire_event('manager.before_config_validate', manager.config, manager)

        task = execute_task('test_variable_from_db')
        assert len(task.accepted) == 1
Example #49
0
    def execute(self, tasks=None, disable_phases=None, entries=None):
        """
        Iterate trough tasks and run them. If --learn is used download and output
        phases are disabled.

        :param list tasks: Optional list of task names to run, all tasks otherwise.
        :param list disable_phases: Optional list of phases to disabled
        :param list entries: Optional list of entries to pass into task(s).
            This will also cause task to disable input phase.
        """
        # Make a list of Task instances to execute
        if tasks is None:
            # Default to all tasks if none are specified
            run_tasks = self.tasks.values()
        else:
            # Turn the list of task names or instances into a list of instances
            run_tasks = []
            for task in tasks:
                if isinstance(task, basestring):
                    if task in self.tasks:
                        run_tasks.append(self.tasks[task])
                    else:
                        log.error('Task `%s` does not exist.' % task)
                else:
                    run_tasks.append(task)

        if not run_tasks:
            log.warning('There are no tasks to execute, please add some tasks')
            return

        disable_phases = disable_phases or []
        # when learning, skip few phases
        if self.options.learn:
            log.info('Disabling download and output phases because of %s' %
                     ('--reset' if self.options.reset else '--learn'))
            disable_phases.extend(['download', 'output'])

        fire_event('manager.execute.started', self)
        self.process_start(tasks=run_tasks)

        for task in sorted(run_tasks):
            if not task.enabled or task._abort:
                continue
            try:
                task.execute(disable_phases=disable_phases, entries=entries)
            except Exception as e:
                task.enabled = False
                log.exception('Task %s: %s' % (task.name, e))
            except KeyboardInterrupt:
                # show real stack trace in debug mode
                if self.options.debug:
                    raise
                print '**** Keyboard Interrupt ****'
                return

        self.process_end(tasks=run_tasks)
        fire_event('manager.execute.completed', self)
Example #50
0
def get_parser(command=None):
    global core_parser
    if not core_parser:
        core_parser = CoreArgumentParser()
        # Add all plugin options to the parser
        fire_event('options.register')
    if command:
        return core_parser.get_subparser(command)
    return core_parser
Example #51
0
def get_parser(command=None):
    global core_parser
    if not core_parser:
        core_parser = CoreArgumentParser()
        # Add all plugin options to the parser
        fire_event('options.register')
    if command:
        return core_parser.get_subparser(command)
    return core_parser
Example #52
0
    def shutdown(self, finish_queue=True):
        """
        Request manager shutdown.

        :param bool finish_queue: Should scheduler finish the task queue
        """
        if not self.initialized:
            raise RuntimeError('Cannot shutdown manager that was never initialized.')
        fire_event('manager.shutdown_requested', self)
        self.task_queue.shutdown(finish_queue)
Example #53
0
    def shutdown(self, finish_queue=True):
        """
        Request manager shutdown.

        :param bool finish_queue: Should scheduler finish the task queue
        """
        if not self.initialized:
            raise RuntimeError('Cannot shutdown manager that was never initialized.')
        fire_event('manager.shutdown_requested', self)
        self.task_queue.shutdown(finish_queue)
Example #54
0
    def __run_task_phase(self, phase):
        """Executes task phase, ie. call all enabled plugins on the task.

        Fires events:

        * task.execute.before_plugin
        * task.execute.after_plugin

        :param string phase: Name of the phase
        """
        if phase not in task_phases + [
                'abort', 'process_start', 'process_end'
        ]:
            raise Exception('%s is not a valid task phase' % phase)
        # warn if no inputs, filters or outputs in the task
        if phase in ['input', 'filter', 'output']:
            if not self.manager.unit_test:
                # Check that there is at least one manually configured plugin for these phases
                for p in self.plugins(phase):
                    if not p.builtin:
                        break
                else:
                    log.warning(
                        'Task doesn\'t have any %s plugins, you should add (at least) one!'
                        % phase)

        for plugin in self.plugins(phase):
            # Abort this phase if one of the plugins disables it
            if phase in self.disabled_phases:
                return
            # store execute info, except during entry events
            self.current_phase = phase
            self.current_plugin = plugin.name

            if plugin.api_ver == 1:
                # backwards compatibility
                # pass method only task (old behaviour)
                args = (self, )
            else:
                # pass method task, copy of config (so plugin cannot modify it)
                args = (self, copy.copy(self.config.get(plugin.name)))

            try:
                fire_event('task.execute.before_plugin', self, plugin.name)
                response = self.__run_plugin(plugin, phase, args)
                if phase == 'input' and response:
                    # add entries returned by input to self.entries
                    self.all_entries.extend(response)
            finally:
                fire_event('task.execute.after_plugin', self, plugin.name)

            # Make sure we abort if any plugin sets our abort flag
            if self._abort and phase != 'abort':
                return
Example #55
0
 def initialize(self):
     """Separated from __init__ so that unit tests can modify options before loading config."""
     self.setup_yaml()
     self.find_config(create=(self.options.cli_command == 'webui'))
     self.init_sqlalchemy()
     fire_event('manager.initialize', self)
     try:
         self.load_config()
     except ValueError as e:
         log.critical('Failed to load config file: %s' % e.args[0])
         self.shutdown(finish_queue=False)
         sys.exit(1)
Example #56
0
 def initialize(self):
     """Separated from __init__ so that unit tests can modify options before loading config."""
     self.setup_yaml()
     self.find_config(create=(self.options.cli_command == 'webui'))
     self.init_sqlalchemy()
     fire_event('manager.initialize', self)
     try:
         self.load_config()
     except ValueError as e:
         log.critical('Failed to load config file: %s' % e.args[0])
         self.shutdown(finish_queue=False)
         sys.exit(1)
Example #57
0
def start(mg):
    """Start WEB UI"""

    global manager
    manager = mg

    # Create sqlalchemy session for Flask usage
    global db_session
    db_session = scoped_session(
        sessionmaker(autocommit=False, autoflush=False, bind=manager.engine))
    if db_session is None:
        raise Exception('db_session is None')

    load_ui_plugins()

    # quick hack: since ui plugins may add tables to SQLAlchemy too and they're not initialized because create
    # was called when instantiating manager .. so we need to call it again
    from flexget.manager import Base
    Base.metadata.create_all(bind=manager.engine)

    app.register_blueprint(api)
    app.register_blueprint(api_schema)
    fire_event('webui.start')

    # Start Flask
    app.secret_key = os.urandom(24)

    set_exit_handler(stop_server)

    log.info('Starting server on port %s' % manager.options.webui.port)

    if manager.options.webui.autoreload:
        # Create and destroy a socket so that any exceptions are raised before
        # we spawn a separate Python interpreter and lose this ability.
        from werkzeug.serving import run_with_reloader
        reloader_interval = 1
        extra_files = None
        test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        test_socket.bind(
            (manager.options.webui.bind, manager.options.webui.port))
        test_socket.close()
        log.warning('Not starting scheduler, since autoreload is enabled.')
        run_with_reloader(start_server, extra_files, reloader_interval)
    else:
        # Start the scheduler
        manager.scheduler.start()
        start_server()

    log.debug('server exited')
    fire_event('webui.stop')
    manager.shutdown(finish_queue=False)
Example #58
0
def get_schema() -> JsonSchema:
    global _root_config_schema
    if _root_config_schema is None:
        _root_config_schema = {
            'type': 'object',
            'properties': {},
            'additionalProperties': False,
            '$schema': CURRENT_SCHEMA_VERSION,
        }
        fire_event('config.register')
        # TODO: Is /schema/config this the best place for this?
        register_schema('/schema/config', _root_config_schema)
    return _root_config_schema
Example #59
0
    def daemon_command(self, options):
        """
        Fires events:

        * manager.daemon.started
        * manager.daemon.completed

        :param options: argparse options
        """
        if options.action == 'start':
            if options.daemonize:
                self.daemonize()
            with self.acquire_lock():
                try:
                    signal.signal(signal.SIGTERM, self._handle_sigterm)
                except ValueError as e:
                    # If flexget is being called from another script, e.g. windows service helper, and we are not the
                    # main thread, this error will occur.
                    log.debug('Error registering sigterm handler: %s' % e)
                self.ipc_server.start()
                fire_event('manager.daemon.started', self)
                self.scheduler.start()
                try:
                    self.scheduler.wait()
                except KeyboardInterrupt:
                    log.info('Got ctrl-c, shutting down.')
                    fire_event('manager.daemon.completed', self)
                    self.shutdown(finish_queue=False)
        elif options.action == 'status':
            ipc_info = self.check_ipc_info()
            if ipc_info:
                log.info('Daemon running. (PID: %s)' % ipc_info['pid'])
            else:
                log.info('No daemon appears to be running for this config.')
        elif options.action in ['stop', 'reload']:
            ipc_info = self.check_ipc_info()
            if ipc_info:
                try:
                    client = IPCClient(ipc_info['port'], ipc_info['password'])
                except ValueError as e:
                    log.error(e)
                else:
                    if options.action == 'stop':
                        client.shutdown()
                    elif options.action == 'reload':
                        client.reload()
                self.shutdown()
            else:
                log.error('There does not appear to be a daemon running.')