Esempio n. 1
0
    def __init__(self, remote=None):

        # load configuration data
        load_config()

        # setup, sanity checks
        self.task_dir = turbogears.config.get("basepath.rpms")

        self._setup_logging()

        # Initialize core attributes
        if remote:
            self.remote = remote.rstrip("/")
            self.proxy = xmlrpclib.ServerProxy(self.remote + '/RPC2')

        self.tasks_added = []
        self.t_downloaded = 0
        self.tasklib = TaskLibrary()
Esempio n. 2
0
    def __init__(self, remote=None):

        # load configuration data
        load_config()

        # setup, sanity checks
        self.task_dir = turbogears.config.get("basepath.rpms", "/var/www/beaker/rpms")

        self._setup_logging()

        # Initialize core attributes
        if remote:
            self.remote = remote.rstrip("/")
            self.proxy = xmlrpclib.ServerProxy(self.remote + '/RPC2')

        self.tasks_added = []
        self.t_downloaded = 0
        self.tasklib = TaskLibrary()
Esempio n. 3
0
class TaskLibrarySync:

    batch_size = 100

    def __init__(self, remote=None):

        # load configuration data
        load_config()

        # setup, sanity checks
        self.task_dir = turbogears.config.get("basepath.rpms")

        self._setup_logging()

        # Initialize core attributes
        if remote:
            self.remote = remote.rstrip("/")
            self.proxy = xmlrpclib.ServerProxy(self.remote + '/RPC2')

        self.tasks_added = []
        self.t_downloaded = 0
        self.tasklib = TaskLibrary()

    def _setup_logging(self):
        formatter = logging.Formatter('%(asctime)s - %(message)s')
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(formatter)
        self.logger = logging.getLogger("")
        self.logger.addHandler(stdout_handler)

    def check_perms(self):
        # See if the euid is the same as that of self.task_dir
        task_dir_uid = os.stat(self.task_dir).st_uid

        if os.geteuid() != task_dir_uid:
            self.logger.critical('You should run this script as user: %s' %
                                 pwd.getpwuid(task_dir_uid).pw_name)
            sys.exit(-1)

    def get_tasks(self, server):

        # if local, directly read the database
        if server == 'local':
            tasks = Task.query.filter(Task.valid == True).all()
            tasks = [task.to_dict() for task in tasks]
        else:
            tasks = self.proxy.tasks.filter({'valid': 1})

        return [task['name'] for task in tasks]

    def _get_task_xml(self, server, task):

        # if local, directly read the database
        if server == 'local':
            try:
                self.logger.debug(
                    'Getting task XML for %s from local database' % task)
                return Task.by_name(task, True).to_xml(False)
            except Exception:
                self.logger.error(
                    'Could not get task XML for %s from local Beaker DB. Continuing.'
                    % task)
                return None

        try:
            self.logger.debug('Getting task XML for %s from %s' %
                              (task, getattr(self, server)))
            return self.proxy.tasks.to_xml(task, False)
        except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as e:
            # If something goes wrong with this task, for example:
            # https://bugzilla.redhat.com/show_bug.cgi?id=915549
            # we do our best to continue anyway...
            self.logger.error(
                'Could not get task XML for %s from %s. Continuing.' %
                (task, server))
            self.logger.error('Error message: %s' % e)
            return None

    def sync_tasks(self, urls_to_sync):
        """Syncs remote tasks to the local task library.

        sync_local_tasks() downloads tasks in batches and syncs
        them to the local task library. If the operation fails at some point
        any batches that have already been processed will be preserved.
        """
        def write_data_from_url(task_url):
            def _write_data_from_url(f):
                siphon(urllib2.urlopen(task_url), f)
                f.flush()

            return _write_data_from_url

        urls_to_sync.sort()
        tasks_and_writes = []
        for task_url in urls_to_sync:
            task_rpm_name = os.path.split(task_url)[1]
            tasks_and_writes.append((
                task_rpm_name,
                write_data_from_url(task_url),
            ))
        # We section the batch processing up to allow other processes
        # that may be queueing for the flock to have access, and to limit
        # wastage of time if an error occurs
        total_number_of_rpms = len(tasks_and_writes)
        rpms_synced = 0
        while rpms_synced < total_number_of_rpms:
            session.begin()
            try:
                tasks_and_writes_current_batch = \
                    tasks_and_writes[rpms_synced:rpms_synced+self.batch_size]
                self.tasklib.update_tasks(tasks_and_writes_current_batch)
            except Exception, e:
                session.rollback()
                session.close()
                self.logger.exception('Error syncing tasks. Got error %s' %
                                      (unicode(e)))
                break
            session.commit()
            self.logger.debug('Synced %s tasks' %
                              len(tasks_and_writes_current_batch))
            rpms_synced += self.batch_size
        session.close()
Esempio n. 4
0
class TaskLibrarySync:


    batch_size = 100

    def __init__(self, remote=None):

        # load configuration data
        load_config()

        # setup, sanity checks
        self.task_dir = turbogears.config.get("basepath.rpms", "/var/www/beaker/rpms")

        self._setup_logging()

        # Initialize core attributes
        if remote:
            self.remote = remote.rstrip("/")
            self.proxy = xmlrpclib.ServerProxy(self.remote + '/RPC2')

        self.tasks_added = []
        self.t_downloaded = 0
        self.tasklib = TaskLibrary()

    def _setup_logging(self):
        formatter = logging.Formatter('%(asctime)s - %(message)s')
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(formatter)
        self.logger = logging.getLogger("")
        self.logger.addHandler(stdout_handler)

    def check_perms(self):
        # See if the euid is the same as that of self.task_dir
        task_dir_uid = os.stat(self.task_dir).st_uid

        if os.geteuid() != task_dir_uid:
            self.logger.critical('You should run this script as user: %s' % pwd.getpwuid(task_dir_uid).pw_name)
            sys.exit(-1)

    def get_tasks(self, server):

        # if local, directly read the database
        if server == 'local':
            tasks = Task.query.filter(Task.valid == True).all()
            tasks = [task.to_dict() for task in tasks]
        else:
            tasks = self.proxy.tasks.filter({'valid':1})

        return [task['name'] for task in tasks]

    def _get_task_xml(self, server, task):

        # if local, directly read the database
        if server == 'local':
            try:
                self.logger.debug('Getting task XML for %s from local database' % task)
                return Task.by_name(task, True).to_xml(False)
            except Exception:
                self.logger.error('Could not get task XML for %s from local Beaker DB. Continuing.' % task)
                return None

        try:
            self.logger.debug('Getting task XML for %s from %s' % (task, getattr(self, server)))
            return self.proxy.tasks.to_xml(task, False)
        except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as e:
            # If something goes wrong with this task, for example:
            # https://bugzilla.redhat.com/show_bug.cgi?id=915549
            # we do our best to continue anyway...
            self.logger.error('Could not get task XML for %s from %s. Continuing.' % (task, server))
            self.logger.error('Error message: %s' % e)
            return None

    def sync_tasks(self, urls_to_sync):
        """Syncs remote tasks to the local task library.

        sync_local_tasks() downloads tasks in batches and syncs
        them to the local task library. If the operation fails at some point
        any batches that have already been processed will be preserved.
        """
        def write_data_from_url(task_url):

            def _write_data_from_url(f):
                siphon(urllib2.urlopen(task_url), f)
                f.flush()

            return _write_data_from_url
        urls_to_sync.sort()
        tasks_and_writes = []
        for task_url in urls_to_sync:
            task_rpm_name = os.path.split(task_url)[1]
            tasks_and_writes.append((task_rpm_name, write_data_from_url(task_url),))
        # We section the batch processing up to allow other processes
        # that may be queueing for the flock to have access, and to limit
        # wastage of time if an error occurs
        total_number_of_rpms = len(tasks_and_writes)
        rpms_synced = 0
        while rpms_synced < total_number_of_rpms:
            session.begin()
            try:
                tasks_and_writes_current_batch = \
                    tasks_and_writes[rpms_synced:rpms_synced+self.batch_size]
                self.tasklib.update_tasks(tasks_and_writes_current_batch)
            except Exception, e:
                session.rollback()
                session.close()
                self.logger.exception('Error syncing tasks. Got error %s' % (unicode(e)))
                break
            session.commit()
            self.logger.debug('Synced %s tasks' % len(tasks_and_writes_current_batch))
            rpms_synced += self.batch_size
        session.close()