예제 #1
0
    def remove(self, id, *args, **kw):
        try:
            labcontroller = LabController.by_id(id)
            labcontroller.removed = datetime.utcnow()
            systems = System.query.filter_by(lab_controller_id=id).values(System.id)
            for system_id in systems:
                sys_activity = SystemActivity(identity.current.user, 'WEBUI', \
                    'Changed', 'lab_controller', labcontroller.fqdn,
                    None, system_id=system_id[0])
            system_table.update().where(system_table.c.lab_controller_id == id).\
                values(lab_controller_id=None).execute()
            watchdogs = Watchdog.by_status(labcontroller=labcontroller, 
                status='active')
            for w in watchdogs:
                w.recipe.recipeset.job.cancel(msg='LabController %s has been deleted' % labcontroller.fqdn)
            for lca in labcontroller._distro_trees:
                lca.distro_tree.activity.append(DistroTreeActivity(
                        user=identity.current.user, service=u'WEBUI',
                        action=u'Removed', field_name=u'lab_controller_assocs',
                        old_value=u'%s %s' % (lca.lab_controller, lca.url),
                        new_value=None))
                session.delete(lca)
            labcontroller.disabled = True
            LabControllerActivity(identity.current.user, 'WEBUI', 
                'Changed', 'Disabled', unicode(False), unicode(True), 
                lab_controller_id=id)
            LabControllerActivity(identity.current.user, 'WEBUI', 
                'Changed', 'Removed', unicode(False), unicode(True), 
                lab_controller_id=id)
            session.commit()
        finally:
            session.close()

        flash( _(u"%s removed") % labcontroller.fqdn )
        raise redirect(".")
예제 #2
0
def provision_scheduled_recipesets(*args):
    """
    if All recipes in a recipeSet are in Scheduled state then move them to
     Running.
    """
    recipesets = RecipeSet.query.join(RecipeSet.job)\
            .filter(and_(Job.dirty_version == Job.clean_version, Job.deleted == None))\
            .filter(not_(RecipeSet.recipes.any(
                Recipe.status != TaskStatus.scheduled)))
    if not recipesets.count():
        return False
    log.debug("Entering provision_scheduled_recipesets")
    for rs_id, in recipesets.values(RecipeSet.id):
        log.info("scheduled_recipesets: RS:%s" % rs_id)
        session.begin()
        try:
            provision_scheduled_recipeset(rs_id)
            session.commit()
        except exceptions.Exception:
            log.exception('Error in provision_scheduled_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
    log.debug("Exiting provision_scheduled_recipesets")
    return True
예제 #3
0
def provision_scheduled_recipesets(*args):
    """
    if All recipes in a recipeSet are in Scheduled state then move them to
     Running.
    """
    recipesets = RecipeSet.query.join(RecipeSet.job)\
            .filter(and_(Job.dirty_version == Job.clean_version, Job.deleted == None))\
            .filter(not_(RecipeSet.recipes.any(
                Recipe.status != TaskStatus.scheduled)))
    if not recipesets.count():
        return False
    log.debug("Entering provision_scheduled_recipesets")
    for rs_id, in recipesets.values(RecipeSet.id):
        log.info("scheduled_recipesets: RS:%s" % rs_id)
        session.begin()
        try:
            provision_scheduled_recipeset(rs_id)
            session.commit()
        except exceptions.Exception:
            log.exception('Error in provision_scheduled_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
    log.debug("Exiting provision_scheduled_recipesets")
    return True
예제 #4
0
 def test_uploading_job_with_invalid_hostRequires_raises_exception(self):
     session.begin()
     try:
         xmljob = XmlJob(xmltramp.parse('''
             <job>
                 <whiteboard>job with invalid hostRequires</whiteboard>
                 <recipeSet>
                     <recipe>
                         <distroRequires>
                             <distro_name op="=" value="BlueShoeLinux5-5" />
                         </distroRequires>
                         <hostRequires>
                             <memory op=">=" value="500MB" />
                         </hostRequires>
                         <task name="/distribution/install" role="STANDALONE">
                             <params/>
                         </task>
                     </recipe>
                 </recipeSet>
             </job>
             '''))
         self.assertRaises(BX, lambda: self.controller.process_xmljob(xmljob, self.user))
     finally:
         session.rollback()
         session.close()
예제 #5
0
    def update_db(self):

        self.logger.info('Updating local Beaker database..')

        for task_rpm in self.tasks_added:

            self.logger.debug('Adding %s'% task_rpm)

            with open(os.path.join(self.task_dir,task_rpm)) as f:
                try:
                    session.begin()
                    task = self.tasks.process_taskinfo(self.tasks.read_taskinfo(f))
                    old_rpm = task.rpm
                    task.rpm = task_rpm
                    session.commit()

                except Exception:
                    session.rollback()
                    session.close()
                    self.logger.critical('Error adding task %s' % task_rpm)
                    unlink_ignore(task_rpm)

                else:                    
                    session.close()
                    self.logger.debug('Successfully added %s' % task.rpm)
                    if old_rpm:
                        unlink_ignore(os.path.join(self.task_dir, old_rpm))

        # Update task repo
        self.logger.info('Creating repodata..')
        Task.update_repo()

        return
예제 #6
0
 def _cleanup_session(self):
     # We clear __dict__ as a kind of hack, to try and drop references to 
     # ORM instances which the test has stored as attributes on itself 
     # (TestCase instances are kept for the life of the test runner!)
     for name in self.__dict__.keys():
         if not name.startswith('_'):
             del self.__dict__[name]
     session.close()
예제 #7
0
 def _cleanup_session(self):
     # We clear __dict__ as a kind of hack, to try and drop references to
     # ORM instances which the test has stored as attributes on itself
     # (TestCase instances are kept for the life of the test runner!)
     for name in self.__dict__.keys():
         if not name.startswith('_'):
             del self.__dict__[name]
     session.close()
예제 #8
0
파일: wsgi.py 프로젝트: ustbgaofan/beaker
def close_session(exception=None):
    try:
        if session.is_active:
            log.warn('Session active when tearing down app context, rolling back')
            session.rollback()
        session.close()
    except Exception, e:
        # log and suppress
        log.exception('Error closing session when tearing down app context')
예제 #9
0
def run_data_migrations():
    migration = _outstanding_data_migrations[0]
    log.debug('Performing online data migration %s (one batch)', migration.name)
    finished = migration.migrate_one_batch(get_engine())
    if finished:
        log.debug('Marking online data migration %s as finished', migration.name)
        with session.begin():
            migration.mark_as_finished()
        session.close()
        _outstanding_data_migrations.pop(0)
    return True
예제 #10
0
파일: wsgi.py 프로젝트: ShaolongHu/beaker
def close_session(exception=None):
    try:
        session.close()
    except Exception, e:
        # If session.close() fails the application is now totally useless: 
        # the next request will fail to start a new transaction because we have 
        # not closed the old one here. So we kill the entire process with 
        # SIGABRT. The application container should spawn a fresh worker to 
        # replace this one.
        log.exception('Error closing session when tearing down app context, aborting')
        os.abort()
예제 #11
0
def close_session(exception=None):
    try:
        session.close()
    except Exception, e:
        # If session.close() fails the application is now totally useless: 
        # the next request will fail to start a new transaction because we have 
        # not closed the old one here. So we kill the entire process with 
        # SIGABRT. The application container should spawn a fresh worker to 
        # replace this one.
        log.exception('Error closing session when tearing down app context, aborting')
        os.abort()
예제 #12
0
def run_data_migrations():
    migration = _outstanding_data_migrations[0]
    log.debug('Performing online data migration %s (one batch)', migration.name)
    finished = migration.migrate_one_batch(get_engine())
    if finished:
        log.debug('Marking online data migration %s as finished', migration.name)
        with session.begin():
            migration.mark_as_finished()
        session.close()
        _outstanding_data_migrations.pop(0)
    return True
예제 #13
0
def close_session(exception=None):
    try:
        session.close()
    except Exception, e:
        # If session.close() fails the application is now totally useless: 
        # the next request will fail to start a new transaction because we have 
        # not closed the old one here. So we kill the entire process with 
        # SIGABRT. The application container should spawn a fresh worker to 
        # replace this one.
        # Note that the most likely failure here is MemoryError, so we must not 
        # do any other work before we abort (not even logging a message) 
        # because that could try to allocate, which will just fail again with 
        # MemoryError.
        os.abort()
예제 #14
0
파일: wsgi.py 프로젝트: joyxu/beaker
def close_session(exception=None):
    try:
        session.close()
    except Exception, e:
        # If session.close() fails the application is now totally useless:
        # the next request will fail to start a new transaction because we have
        # not closed the old one here. So we kill the entire process with
        # SIGABRT. The application container should spawn a fresh worker to
        # replace this one.
        # Note that the most likely failure here is MemoryError, so we must not
        # do any other work before we abort (not even logging a message)
        # because that could try to allocate, which will just fail again with
        # MemoryError.
        os.abort()
 def __updateConnInst(self):
     """Update the current connector instance with current date/time to avoid duplicate task while requeueing
     """
     try:
         session.begin()
         connector_instance = model.ConnectorInstance.query().get(self.task.connector_instance_id)
         instance_data = json.loads(connector_instance.instance_data)
         instance_data['uri'] = 'http://trigger.datasift.com/?' + datetime.now().strftime('%Y%m%d%H%M%S%f')           
         connector_instance.instance_data = json.dumps(instance_data)
         session.save_or_update(connector_instance)
         session.commit()
         session.close()
     except:
         log.exception(traceback.format_exc())
         log.exception(self.log_msg('exception while updating twitter streaming random URL'))
         session.rollback()
         session.close()
예제 #16
0
    def sync_tasks(self, urls_to_sync):
        """Syncs remote tasks to the local task library.

        sync_local_tasks() downloads tasks in batches and syncs
        them to the local task library. If the operation fails at some point
        any batches that have already been processed will be preserved.
        """
        def write_data_from_url(task_url):
            def _write_data_from_url(f):
                siphon(urllib2.urlopen(task_url), f)
                f.flush()

            return _write_data_from_url

        urls_to_sync.sort()
        tasks_and_writes = []
        for task_url in urls_to_sync:
            task_rpm_name = os.path.split(task_url)[1]
            tasks_and_writes.append((
                task_rpm_name,
                write_data_from_url(task_url),
            ))
        # We section the batch processing up to allow other processes
        # that may be queueing for the flock to have access, and to limit
        # wastage of time if an error occurs
        total_number_of_rpms = len(tasks_and_writes)
        rpms_synced = 0
        while rpms_synced < total_number_of_rpms:
            session.begin()
            try:
                tasks_and_writes_current_batch = \
                    tasks_and_writes[rpms_synced:rpms_synced+self.batch_size]
                self.tasklib.update_tasks(tasks_and_writes_current_batch)
            except Exception, e:
                session.rollback()
                session.close()
                self.logger.exception('Error syncing tasks. Got error %s' %
                                      (unicode(e)))
                break
            session.commit()
            self.logger.debug('Synced %s tasks' %
                              len(tasks_and_writes_current_batch))
            rpms_synced += self.batch_size
예제 #17
0
def abort_dead_recipes(*args):
    work_done = False
    with session.begin():
        filters = [not_(DistroTree.lab_controller_assocs.any())]
        if _virt_enabled():
            filters.append(
                and_(not_(Recipe.systems.any()),
                     Recipe.virt_status != RecipeVirtStatus.possible))
        else:
            filters.append(not_(Recipe.systems.any()))

        # Following query is looking for recipes stuck in Queued state.
        # This may be caused by no longer valid distribution in Lab Controller
        # or no machines available.
        # However, we have to account that custom distribution can be
        # used and this distribution is not stored in Database at all.
        recipes = MachineRecipe.query\
                .join(MachineRecipe.recipeset).join(RecipeSet.job)\
                .filter(not_(Job.is_dirty))\
                .outerjoin(Recipe.distro_tree)\
                .outerjoin(Recipe.systems) \
                .filter(Recipe.status == TaskStatus.queued)\
                .filter(or_(DistroTree.id.isnot(None), System.status == SystemStatus.broken)) \
                .filter(or_(*filters))
        recipe_ids = [
            recipe_id for recipe_id, in recipes.values(MachineRecipe.id)
        ]
    if recipe_ids:
        log.debug('Aborting dead recipes [%s ... %s] (%d total)',
                  recipe_ids[0], recipe_ids[-1], len(recipe_ids))
    for recipe_id in recipe_ids:
        session.begin()
        try:
            abort_dead_recipe(recipe_id)
            session.commit()
        except exceptions.Exception as e:
            log.exception('Error in abort_dead_recipe(%s)', recipe_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #18
0
 def setUp(self):
     # Create tables
     metadata.drop_all()
     metadata.create_all()
     user1 = User() 
     user1.user_name='bobvilla'
     user1.email_address='*****@*****.**'
     user1.display_name='Bob Villa'
     user1.password='******'   
     session.save(user1)
     u2 = User()
     u2.user_name='bobathome'
     u2.email_address='*****@*****.**'
     u2.display_name='Bob Villa'
     u2.password='******'
     session.save(u2) 
     session.flush()
     print 'UuUuUuU %s' % user1
     self.user1 = user1
     session.clear()
     session.close()
예제 #19
0
def metrics_loop(*args, **kwargs):
    # bind thread local session to reports_engine
    metrics_session = create_session(bind=get_reports_engine())
    session.registry.set(metrics_session)

    while running:
        start = time.time()
        try:
            session.begin()
            recipe_count_metrics()
            system_count_metrics()
            dirty_job_metrics()
            system_command_metrics()
        except Exception:
            log.exception('Exception in metrics loop')
        finally:
            session.close()
        end = time.time()
        duration = end - start
        if duration >= 30.0:
            log.debug("Metrics collection took %d seconds", duration)
        time.sleep(max(30.0 - duration, 5.0))
예제 #20
0
def process_new_recipes(*args):
    work_done = False
    with session.begin():
        recipes = MachineRecipe.query\
                .join(MachineRecipe.recipeset).join(RecipeSet.job)\
                .filter(Recipe.status == TaskStatus.new)
        recipe_ids = [recipe_id for recipe_id, in recipes.values(MachineRecipe.id)]
    if recipe_ids:
        log.debug('Processing new recipes [%s ... %s] (%d total)',
                  recipe_ids[0], recipe_ids[-1], len(recipe_ids))
    for recipe_id in recipe_ids:
        session.begin()
        try:
            process_new_recipe(recipe_id)
            session.commit()
        except Exception as e:
            log.exception('Error in process_new_recipe(%s)', recipe_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #21
0
def update_dirty_jobs():
    work_done = False
    with session.begin():
        dirty_jobs = Job.query.filter(Job.is_dirty)
        job_ids = [job_id for job_id, in dirty_jobs.values(Job.id)]
    if job_ids:
        log.debug('Updating dirty jobs [%s ... %s] (%d total)', job_ids[0],
                  job_ids[-1], len(job_ids))
    for job_id in job_ids:
        session.begin()
        try:
            update_dirty_job(job_id)
            session.commit()
        except Exception as e:
            log.exception('Error in update_dirty_job(%s)', job_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
        if event.is_set():
            break
    return work_done
예제 #22
0
def queue_processed_recipesets(*args):
    work_done = False
    with session.begin():
        recipesets = RecipeSet.by_recipe_status(TaskStatus.processed)\
                .order_by(RecipeSet.priority.desc())\
                .order_by(RecipeSet.id)
        recipeset_ids = [rs_id for rs_id, in recipesets.values(RecipeSet.id)]
    if recipeset_ids:
        log.debug('Queuing processed recipe sets [%s ... %s] (%d total)',
                  recipeset_ids[0], recipeset_ids[-1], len(recipeset_ids))
    for rs_id in recipeset_ids:
        session.begin()
        try:
            queue_processed_recipeset(rs_id)
            session.commit()
        except Exception as e:
            log.exception('Error in queue_processed_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #23
0
def schedule_pending_systems():
    work_done = False
    with session.begin():
        systems = System.query\
                .join(System.lab_controller)\
                .filter(LabController.disabled == False)\
                .filter(System.scheduler_status == SystemSchedulerStatus.pending)
        system_ids = [system_id for system_id, in systems.values(System.id)]
    if system_ids:
        log.debug('Scheduling pending systems (%d total)', len(system_ids))
    for system_id in system_ids:
        session.begin()
        try:
            schedule_pending_system(system_id)
            session.commit()
        except Exception as e:
            log.exception('Error in schedule_pending_system(%s)', system_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #24
0
def provision_scheduled_recipesets(*args):
    """
    if All recipes in a recipeSet are in Scheduled state then move them to
     Running.
    """
    work_done = False
    recipesets = RecipeSet.query.join(RecipeSet.job)\
            .filter(not_(Job.is_deleted))\
            .filter(not_(RecipeSet.recipes.any(
                Recipe.status != TaskStatus.scheduled)))
    for rs_id, in recipesets.values(RecipeSet.id):
        session.begin()
        try:
            provision_scheduled_recipeset(rs_id)
            session.commit()
        except exceptions.Exception:
            log.exception('Error in provision_scheduled_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #25
0
def metrics_loop(*args, **kwargs):
    # bind thread local session to reports_engine
    metrics_session = create_session(bind=get_reports_engine())
    session.registry.set(metrics_session)

    while running:
        start = time.time()
        try:
            session.begin()
            recipe_count_metrics()
            system_count_metrics()
            dirty_job_metrics()
            system_command_metrics()
        except Exception:
            log.exception('Exception in metrics loop')
        finally:
            session.close()
        end = time.time()
        duration = end - start
        if duration >= 30.0:
            log.debug("Metrics collection took %d seconds", duration)
        time.sleep(max(30.0 - duration, 5.0))
예제 #26
0
    def sync_tasks(self, urls_to_sync):
        """Syncs remote tasks to the local task library.

        sync_local_tasks() downloads tasks in batches and syncs
        them to the local task library. If the operation fails at some point
        any batches that have already been processed will be preserved.
        """
        def write_data_from_url(task_url):

            def _write_data_from_url(f):
                siphon(urllib2.urlopen(task_url), f)
                f.flush()

            return _write_data_from_url
        urls_to_sync.sort()
        tasks_and_writes = []
        for task_url in urls_to_sync:
            task_rpm_name = os.path.split(task_url)[1]
            tasks_and_writes.append((task_rpm_name, write_data_from_url(task_url),))
        # We section the batch processing up to allow other processes
        # that may be queueing for the flock to have access, and to limit
        # wastage of time if an error occurs
        total_number_of_rpms = len(tasks_and_writes)
        rpms_synced = 0
        while rpms_synced < total_number_of_rpms:
            session.begin()
            try:
                tasks_and_writes_current_batch = \
                    tasks_and_writes[rpms_synced:rpms_synced+self.batch_size]
                self.tasklib.update_tasks(tasks_and_writes_current_batch)
            except Exception, e:
                session.rollback()
                session.close()
                self.logger.exception('Error syncing tasks. Got error %s' % (unicode(e)))
                break
            session.commit()
            self.logger.debug('Synced %s tasks' % len(tasks_and_writes_current_batch))
            rpms_synced += self.batch_size
예제 #27
0
def provision_scheduled_recipesets(*args):
    """
    if All recipes in a recipeSet are in Scheduled state then move them to
     Running.
    """
    work_done = False
    with session.begin():
        recipesets = RecipeSet.by_recipe_status(TaskStatus.scheduled)
        recipeset_ids = [rs_id for rs_id, in recipesets.values(RecipeSet.id)]
    if recipeset_ids:
        log.debug('Provisioning scheduled recipe sets [%s ... %s] (%d total)',
                  recipeset_ids[0], recipeset_ids[-1], len(recipeset_ids))
    for rs_id in recipeset_ids:
        session.begin()
        try:
            provision_scheduled_recipeset(rs_id)
            session.commit()
        except exceptions.Exception:
            log.exception('Error in provision_scheduled_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #28
0
def provision_scheduled_recipesets(*args):
    """
    if All recipes in a recipeSet are in Scheduled state then move them to
     Running.
    """
    work_done = False
    with session.begin():
        recipesets = RecipeSet.by_recipe_status(TaskStatus.scheduled)
        recipeset_ids = [rs_id for rs_id, in recipesets.values(RecipeSet.id)]
    if recipeset_ids:
        log.debug('Provisioning scheduled recipe sets [%s ... %s] (%d total)',
                  recipeset_ids[0], recipeset_ids[-1], len(recipeset_ids))
    for rs_id in recipeset_ids:
        session.begin()
        try:
            provision_scheduled_recipeset(rs_id)
            session.commit()
        except exceptions.Exception:
            log.exception('Error in provision_scheduled_recipeset(%s)', rs_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
    return work_done
예제 #29
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')
    logger.info('Getting expired jobs')

    failed = False
    if not dry:
        requests_session = requests.Session()
        requests_session.auth = requests_kerberos.HTTPKerberosAuth(
                             mutual_authentication=requests_kerberos.OPTIONAL)
    for job, logs in Job.expired_logs(limit):
        logger.info('Deleting logs for %s', job.t_id)
        try:
            session.begin()
            for log in logs:
                if not dry:
                    if urlparse.urlparse(log).scheme:
                        response = requests_session.delete(log)
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(log)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print log
            if not dry:
                job.delete()
                session.commit()
                session.close()
            else:
                session.close()
        except Exception, e:
            logger.exception('Exception while deleting logs for %s', job.t_id)
            failed = True
            # session needs to be open for job.t_id in the log message above
            session.close()
            continue
예제 #30
0
def provision_virt_recipe(recipe_id):
    log.debug('Attempting to provision dynamic virt guest for recipe %s',
              recipe_id)
    session.begin()
    try:
        recipe = Recipe.by_id(recipe_id)
        job_owner = recipe.recipeset.job.owner
        manager = dynamic_virt.VirtManager(job_owner)
        available_flavors = manager.available_flavors()
        # We want them in order of smallest to largest, so that we can pick the
        # smallest flavor that satisfies the recipe's requirements. Sorting by RAM
        # is a decent approximation.
        possible_flavors = XmlHost.from_string(recipe.host_requires)\
            .filter_openstack_flavors(available_flavors, manager.lab_controller)
        if not possible_flavors:
            log.info(
                'No OpenStack flavors matched recipe %s, marking precluded',
                recipe.id)
            recipe.virt_status = RecipeVirtStatus.precluded
            return
        # cheapest flavor has the smallest disk and ram
        # id guarantees consistency of our results
        flavor = min(possible_flavors,
                     key=lambda flavor: (flavor.ram, flavor.disk, flavor.id))
        vm_name = '%srecipe-%s' % (ConfigItem.by_name(
            u'guest_name_prefix').current_value(u'beaker-'), recipe.id)
        log.debug('Creating VM named %s as flavor %s', vm_name, flavor)
        vm = manager.create_vm(vm_name, flavor)
        vm.instance_created = datetime.utcnow()
        try:
            recipe.createRepo()
            recipe.clear_candidate_systems()
            recipe.watchdog = Watchdog()
            recipe.resource = vm
            recipe.recipeset.lab_controller = manager.lab_controller
            recipe.virt_status = RecipeVirtStatus.succeeded
            recipe.schedule()
            log.info(
                "recipe ID %s moved from Queued to Scheduled by provision_virt_recipe",
                recipe.id)
            recipe.waiting()
            recipe.provision()
            log.info(
                "recipe ID %s moved from Scheduled to Waiting by provision_virt_recipe",
                recipe.id)
        except:
            exc_type, exc_value, exc_tb = sys.exc_info()
            try:
                manager.destroy_vm(vm)
            except Exception:
                log.exception(
                    'Failed to clean up VM %s during provision_virt_recipe, leaked!',
                    vm.instance_id)
                # suppress this exception so the original one is not masked
            raise exc_type, exc_value, exc_tb
        session.commit()
    except Exception as e:
        log.exception('Error in provision_virt_recipe(%s)', recipe_id)
        session.rollback()
        # As an added precaution, let's try and avoid this recipe in future
        with session.begin():
            recipe = Recipe.by_id(recipe_id)
            recipe.virt_status = RecipeVirtStatus.failed
    finally:
        session.close()
예제 #31
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')
    logger.info('Getting expired jobs')

    failed = False
    if not dry:
        requests_session = requests.Session()
        log_delete_user = config.get('beaker.log_delete_user')
        log_delete_password = config.get('beaker.log_delete_password')

        available_auths = []
        available_auth_names = []

        if _kerberos_available:
            available_auths.append(
                requests_kerberos.HTTPKerberosAuth(
                    mutual_authentication=requests_kerberos.DISABLED))
            available_auth_names.append('Kerberos')

        if log_delete_user and log_delete_password:
            available_auths.append(
                requests.auth.HTTPDigestAuth(log_delete_user,
                                             log_delete_password))
            available_auth_names.append('HTTPDigestAuth')
        requests_session.auth = MultipleAuth(available_auths)
        logger.debug('Available authentication methods: %s' %
                     ', '.join(available_auth_names))

    for jobid, in Job.query.filter(Job.is_expired).limit(limit).values(Job.id):
        logger.info('Deleting logs for job %s', jobid)
        try:
            session.begin()
            job = Job.by_id(jobid)
            all_logs = job.all_logs(load_parent=False)
            # We always delete entire directories, not individual log files,
            # because that's faster, and because we never mix unrelated log
            # files together in the same directory so it's safe to do that.
            # We keep a trailing slash on the directories otherwise when we try
            # to DELETE them, Apache will first redirect us to the trailing
            # slash.
            log_dirs = (os.path.dirname(log.full_path) + '/'
                        for log in all_logs)
            for path in remove_descendants(log_dirs):
                if not dry:
                    if urlparse.urlparse(path).scheme:
                        # We need to handle redirects ourselves, since requests
                        # turns DELETE into GET on 302 which we do not want.
                        response = requests_session.delete(
                            path, allow_redirects=False)
                        redirect_limit = 10
                        while redirect_limit > 0 and response.status_code in (
                                301, 302, 303, 307):
                            response = requests_session.delete(
                                response.headers['Location'],
                                allow_redirects=False)
                            redirect_limit -= 1
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(path)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print path
            if not dry:
                job.delete()
                session.commit()
                session.close()
            else:
                session.close()
        except Exception, e:
            logger.exception('Exception while deleting logs for job %s', jobid)
            failed = True
            session.close()
            continue
예제 #32
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')
    logger.info('Getting expired jobs')

    failed = False
    if not dry:
        requests_session = requests.Session()
        log_delete_user = config.get('beaker.log_delete_user')
        log_delete_password = config.get('beaker.log_delete_password')

        available_auths = []
        available_auth_names = []

        if _kerberos_available:
            available_auths.append(
                requests_kerberos.HTTPKerberosAuth(
                    mutual_authentication=requests_kerberos.DISABLED))
            available_auth_names.append('Kerberos')

        if log_delete_user and log_delete_password:
            available_auths.append(
                requests.auth.HTTPDigestAuth(log_delete_user,
                                             log_delete_password))
            available_auth_names.append('HTTPDigestAuth')
        requests_session.auth = MultipleAuth(available_auths)
        logger.debug('Available authentication methods: %s' %
                     ', '.join(available_auth_names))

    for job, logs in Job.expired_logs(limit):
        logger.info('Deleting logs for %s', job.t_id)
        try:
            session.begin()
            for log in logs:
                if not dry:
                    if urlparse.urlparse(log).scheme:
                        # We need to handle redirects ourselves, since requests
                        # turns DELETE into GET on 302 which we do not want.
                        response = requests_session.delete(
                            log, allow_redirects=False)
                        redirect_limit = 10
                        while redirect_limit > 0 and response.status_code in (
                                301, 302, 303, 307):
                            response = requests_session.delete(
                                response.headers['Location'],
                                allow_redirects=False)
                            redirect_limit -= 1
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(log)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print log
            if not dry:
                job.delete()
                session.commit()
                session.close()
            else:
                session.close()
        except Exception, e:
            logger.exception('Exception while deleting logs for %s', job.t_id)
            failed = True
            # session needs to be open for job.t_id in the log message above
            session.close()
            continue
예제 #33
0
 def tearDown(self):
     session.rollback()
     session.close()
예제 #34
0
class TaskLibrarySync:

    batch_size = 100

    def __init__(self, remote=None):

        # load configuration data
        load_config()

        # setup, sanity checks
        self.task_dir = turbogears.config.get("basepath.rpms")

        self._setup_logging()

        # Initialize core attributes
        if remote:
            self.remote = remote.rstrip("/")
            self.proxy = xmlrpclib.ServerProxy(self.remote + '/RPC2')

        self.tasks_added = []
        self.t_downloaded = 0
        self.tasklib = TaskLibrary()

    def _setup_logging(self):
        formatter = logging.Formatter('%(asctime)s - %(message)s')
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(formatter)
        self.logger = logging.getLogger("")
        self.logger.addHandler(stdout_handler)

    def check_perms(self):
        # See if the euid is the same as that of self.task_dir
        task_dir_uid = os.stat(self.task_dir).st_uid

        if os.geteuid() != task_dir_uid:
            self.logger.critical('You should run this script as user: %s' %
                                 pwd.getpwuid(task_dir_uid).pw_name)
            sys.exit(-1)

    def get_tasks(self, server):

        # if local, directly read the database
        if server == 'local':
            tasks = Task.query.filter(Task.valid == True).all()
            tasks = [task.to_dict() for task in tasks]
        else:
            tasks = self.proxy.tasks.filter({'valid': 1})

        return [task['name'] for task in tasks]

    def _get_task_xml(self, server, task):

        # if local, directly read the database
        if server == 'local':
            try:
                self.logger.debug(
                    'Getting task XML for %s from local database' % task)
                return Task.by_name(task, True).to_xml(False)
            except Exception:
                self.logger.error(
                    'Could not get task XML for %s from local Beaker DB. Continuing.'
                    % task)
                return None

        try:
            self.logger.debug('Getting task XML for %s from %s' %
                              (task, getattr(self, server)))
            return self.proxy.tasks.to_xml(task, False)
        except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as e:
            # If something goes wrong with this task, for example:
            # https://bugzilla.redhat.com/show_bug.cgi?id=915549
            # we do our best to continue anyway...
            self.logger.error(
                'Could not get task XML for %s from %s. Continuing.' %
                (task, server))
            self.logger.error('Error message: %s' % e)
            return None

    def sync_tasks(self, urls_to_sync):
        """Syncs remote tasks to the local task library.

        sync_local_tasks() downloads tasks in batches and syncs
        them to the local task library. If the operation fails at some point
        any batches that have already been processed will be preserved.
        """
        def write_data_from_url(task_url):
            def _write_data_from_url(f):
                siphon(urllib2.urlopen(task_url), f)
                f.flush()

            return _write_data_from_url

        urls_to_sync.sort()
        tasks_and_writes = []
        for task_url in urls_to_sync:
            task_rpm_name = os.path.split(task_url)[1]
            tasks_and_writes.append((
                task_rpm_name,
                write_data_from_url(task_url),
            ))
        # We section the batch processing up to allow other processes
        # that may be queueing for the flock to have access, and to limit
        # wastage of time if an error occurs
        total_number_of_rpms = len(tasks_and_writes)
        rpms_synced = 0
        while rpms_synced < total_number_of_rpms:
            session.begin()
            try:
                tasks_and_writes_current_batch = \
                    tasks_and_writes[rpms_synced:rpms_synced+self.batch_size]
                self.tasklib.update_tasks(tasks_and_writes_current_batch)
            except Exception, e:
                session.rollback()
                session.close()
                self.logger.exception('Error syncing tasks. Got error %s' %
                                      (unicode(e)))
                break
            session.commit()
            self.logger.debug('Synced %s tasks' %
                              len(tasks_and_writes_current_batch))
            rpms_synced += self.batch_size
        session.close()
예제 #35
0
def populate_db(user_name=None, password=None, user_display_name=None,
                user_email_address=None):
    logger.info('Populating tables with pre-defined values if necessary')
    session.begin()

    try:
        admin = Group.by_name(u'admin')
    except InvalidRequestError:
        admin = Group(group_name=u'admin', display_name=u'Admin')
        session.add(admin)

    try:
        lab_controller = Group.by_name(u'lab_controller')
    except InvalidRequestError:
        lab_controller = Group(group_name=u'lab_controller',
                               display_name=u'Lab Controller')
        session.add(lab_controller)

    # Setup User account
    if user_name:
        user = User.lazy_create(user_name=user_name.decode('utf8'))
        if password:
            user.password = password.decode('utf8')
        if user_display_name:
            user.display_name = user_display_name.decode('utf8')
        if user_email_address:
            user.email_address = user_email_address.decode('utf8')
        # Ensure the user is in the 'admin' group as an owner.
        # Flush for lazy_create.
        session.flush()
        user_group_assoc = UserGroup.lazy_create(
            user_id=user.user_id, group_id=admin.group_id)
        user_group_assoc.is_owner = True

    # Create distro_expire perm if not present
    try:
        _ = Permission.by_name(u'distro_expire')
    except NoResultFound:
        distro_expire_perm = Permission(u'distro_expire')
        session.add(distro_expire_perm)

    # Create proxy_auth perm if not present
    try:
        _ = Permission.by_name(u'proxy_auth')
    except NoResultFound:
        proxy_auth_perm = Permission(u'proxy_auth')
        session.add(proxy_auth_perm)

    # Create tag_distro perm if not present
    try:
        _ = Permission.by_name(u'tag_distro')
    except NoResultFound:
        tag_distro_perm = Permission(u'tag_distro')
        admin.permissions.append(tag_distro_perm)

    # Create stop_task perm if not present
    try:
        _ = Permission.by_name(u'stop_task')
    except NoResultFound:
        stop_task_perm = Permission(u'stop_task')
        lab_controller.permissions.append(stop_task_perm)
        admin.permissions.append(stop_task_perm)

    # Create secret_visible perm if not present
    try:
        _ = Permission.by_name(u'secret_visible')
    except NoResultFound:
        secret_visible_perm = Permission(u'secret_visible')
        lab_controller.permissions.append(secret_visible_perm)
        admin.permissions.append(secret_visible_perm)

    # Create change_prio perm if not present
    try:
        _ = Permission.by_name(u'change_prio')
    except NoResultFound:
        change_prio_perm = Permission(u'change_prio')
        session.add(change_prio_perm)

    # Setup Hypervisors Table
    if Hypervisor.query.count() == 0:
        for h in [u'KVM', u'Xen', u'HyperV', u'VMWare']:
            session.add(Hypervisor(hypervisor=h))

    # Setup kernel_type Table
    if KernelType.query.count() == 0:
        for type in [u'default', u'highbank', u'imx', u'omap', u'tegra']:
            session.add(KernelType(kernel_type=type, uboot=False))
        for type in [u'mvebu']:
            session.add(KernelType(kernel_type=type, uboot=True))

    # Setup base Architectures
    if Arch.query.count() == 0:
        for arch in [u'i386', u'x86_64', u'ia64', u'ppc', u'ppc64', u'ppc64le',
                     u's390', u's390x', u'armhfp', u'aarch64', u'arm']:
            session.add(Arch(arch))

    # Setup base power types
    if PowerType.query.count() == 0:
        for power_type in [u'apc_snmp', u'apc_snmp_then_etherwake',
                           u'bladecenter', u'bladepap', u'drac', u'ether_wake', u'hyper-v',
                           u'ilo', u'integrity', u'ipmilan', u'ipmitool', u'lpar', u'rsa',
                           u'virsh', u'wti']:
            session.add(PowerType(power_type))

    # Setup key types
    if Key.query.count() == 0:
        session.add(Key(u'DISKSPACE', True))
        session.add(Key(u'COMMENT'))
        session.add(Key(u'CPUFAMILY', True))
        session.add(Key(u'CPUFLAGS'))
        session.add(Key(u'CPUMODEL'))
        session.add(Key(u'CPUMODELNUMBER', True))
        session.add(Key(u'CPUSPEED', True))
        session.add(Key(u'CPUVENDOR'))
        session.add(Key(u'DISK', True))
        session.add(Key(u'FORMFACTOR'))
        session.add(Key(u'HVM'))
        session.add(Key(u'MEMORY', True))
        session.add(Key(u'MODEL'))
        session.add(Key(u'MODULE'))
        session.add(Key(u'NETWORK'))
        session.add(Key(u'NR_DISKS', True))
        session.add(Key(u'NR_ETH', True))
        session.add(Key(u'NR_IB', True))
        session.add(Key(u'PCIID'))
        session.add(Key(u'PROCESSORS', True))
        session.add(Key(u'RTCERT'))
        session.add(Key(u'SCRATCH'))
        session.add(Key(u'STORAGE'))
        session.add(Key(u'USBID'))
        session.add(Key(u'VENDOR'))
        session.add(Key(u'XENCERT'))
        session.add(Key(u'NETBOOT_METHOD'))

    if RetentionTag.query.count() == 0:
        session.add(RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30))
        session.add(RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60))
        session.add(RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120))
        session.add(RetentionTag(tag=u'active', needs_product=True))
        session.add(RetentionTag(tag=u'audit', needs_product=True))

    config_items = [
        # name, description, numeric
        (u'root_password', u'Plaintext root password for provisioned systems', False),
        (u'root_password_validity', u"Maximum number of days a user's root password is valid for",
         True),
        (u'guest_name_prefix', u'Prefix for names of dynamic guests in OpenStack', False),
        (u'guest_private_network', u'Network address in CIDR format for private networks'
                                   ' of dynamic guests in OpenStack.', False),
    ]
    for name, description, numeric in config_items:
        ConfigItem.lazy_create(name=name, description=description, numeric=numeric)
    if ConfigItem.by_name(u'root_password').current_value() is None:
        ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0])
    if ConfigItem.by_name(u'guest_private_network').current_value() is None:
        ConfigItem.by_name(u'guest_private_network').set(u'192.168.10.0/24',
                                                         user=admin.users[0])

    session.commit()
    session.close()
    logger.info('Pre-defined values populated')
예제 #36
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')
    logger.info('Getting expired jobs')

    failed = False
    if not dry:
        requests_session = requests.Session()
        log_delete_user = config.get('beaker.log_delete_user')
        log_delete_password = config.get('beaker.log_delete_password')

        available_auths = []
        available_auth_names = []

        if _kerberos_available:
            available_auths.append(requests_kerberos.HTTPKerberosAuth(
                mutual_authentication=requests_kerberos.DISABLED))
            available_auth_names.append('Kerberos')

        if log_delete_user and log_delete_password:
            available_auths.append(requests.auth.HTTPDigestAuth(log_delete_user,
                log_delete_password))
            available_auth_names.append('HTTPDigestAuth')
        requests_session.auth = MultipleAuth(available_auths)
        logger.debug('Available authentication methods: %s' %
            ', '.join(available_auth_names))

    for job, logs in Job.expired_logs(limit):
        logger.info('Deleting logs for %s', job.t_id)
        try:
            session.begin()
            for log in logs:
                if not dry:
                    if urlparse.urlparse(log).scheme:
                        # We need to handle redirects ourselves, since requests
                        # turns DELETE into GET on 302 which we do not want.
                        response = requests_session.delete(log, allow_redirects=False)
                        redirect_limit = 10
                        while redirect_limit > 0 and response.status_code in (
                                301, 302, 303, 307):
                            response = requests_session.delete(
                                    response.headers['Location'],
                                    allow_redirects=False)
                            redirect_limit -= 1
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(log)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print log
            if not dry:
                job.delete()
                session.commit()
                session.close()
            else:
                session.close()
        except Exception, e:
            logger.exception('Exception while deleting logs for %s', job.t_id)
            failed = True
            # session needs to be open for job.t_id in the log message above
            session.close()
            continue
예제 #37
0
 def tearDown(self):
     session.commit()
     session.close()
예제 #38
0
def update_dirty_jobs():
    dirty_jobs = Job.query.filter(Job.dirty_version != Job.clean_version)
    if not dirty_jobs.count():
        return False
    log.debug("Entering update_dirty_jobs")
    for job_id, in dirty_jobs.values(Job.id):
        session.begin()
        try:
            update_dirty_job(job_id)
            session.commit()
        except Exception, e:
            log.exception('Error in update_dirty_job(%s)', job_id)
            session.rollback()
        finally:
            session.close()
        if event.is_set():
            break
    log.debug("Exiting update_dirty_jobs")
    return True

def update_dirty_job(job_id):
    log.debug('Updating dirty job %s', job_id)
    job = Job.by_id(job_id)
    job.update_status()

def process_new_recipes(*args):
    recipes = MachineRecipe.query\
            .join(MachineRecipe.recipeset).join(RecipeSet.job)\
            .filter(Job.dirty_version == Job.clean_version)\
            .filter(Recipe.status == TaskStatus.new)
예제 #39
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')

    failed = False
    if not dry:
        requests_session = requests.Session()
        log_delete_user = config.get('beaker.log_delete_user')
        log_delete_password = config.get('beaker.log_delete_password')

        available_auths = []
        available_auth_names = []

        if _kerberos_available:
            available_auths.append(requests_kerberos.HTTPKerberosAuth(
                mutual_authentication=requests_kerberos.DISABLED))
            available_auth_names.append('Kerberos')

        if log_delete_user and log_delete_password:
            available_auths.append(requests.auth.HTTPDigestAuth(log_delete_user,
                log_delete_password))
            available_auth_names.append('HTTPDigestAuth')
        requests_session.auth = MultipleAuth(available_auths)
        logger.debug('Available authentication methods: %s' %
            ', '.join(available_auth_names))

    logger.info('Fetching expired jobs to be deleted')
    try:
        session.begin()
        for job in Job.query.filter(Job.is_expired).limit(limit):
            logger.info('Deleting expired job %s', job.id)
            job.deleted = datetime.datetime.utcnow()
        if not dry:
            session.commit()
        else:
            session.rollback()
        session.close()
    except Exception as e:
        logger.exception('Exception while deleting expired jobs')
        failed = True
        session.close()

    logger.info('Fetching deleted jobs to be purged')
    with session.begin():
        jobs = Job.query.filter(and_(Job.is_deleted, Job.purged == None)).limit(limit)
        job_ids = [job_id for job_id, in jobs.values(Job.id)]
    for jobid in job_ids:
        logger.info('Purging logs for deleted job %s', jobid)
        try:
            session.begin()
            job = Job.by_id(jobid)
            all_logs = job.all_logs(load_parent=False)
            # We always delete entire directories, not individual log files, 
            # because that's faster, and because we never mix unrelated log 
            # files together in the same directory so it's safe to do that.
            # We keep a trailing slash on the directories otherwise when we try 
            # to DELETE them, Apache will first redirect us to the trailing 
            # slash.
            log_dirs = (os.path.dirname(log.full_path) + '/' for log in all_logs)
            for path in remove_descendants(log_dirs):
                if not dry:
                    if urlparse.urlparse(path).scheme:
                        # We need to handle redirects ourselves, since requests
                        # turns DELETE into GET on 302 which we do not want.
                        response = requests_session.delete(path, allow_redirects=False)
                        redirect_limit = 10
                        while redirect_limit > 0 and response.status_code in (
                                301, 302, 303, 307):
                            response = requests_session.delete(
                                    response.headers['Location'],
                                    allow_redirects=False)
                            redirect_limit -= 1
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(path)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print path
            job.purge()
            if not dry:
                session.commit()
            else:
                session.rollback()
            session.close()
        except Exception, e:
            logger.exception('Exception while purging logs for job %s', jobid)
            failed = True
            session.close()
            continue
예제 #40
0
파일: init.py 프로젝트: ShaolongHu/beaker
def populate_db(user_name=None, password=None, user_display_name=None,
        user_email_address=None):
    session.begin()

    try:
        admin = Group.by_name(u'admin')
    except InvalidRequestError:
        admin     = Group(group_name=u'admin',display_name=u'Admin')
        session.add(admin)

    try:
        lab_controller = Group.by_name(u'lab_controller')
    except InvalidRequestError:
        lab_controller = Group(group_name=u'lab_controller',
                               display_name=u'Lab Controller')
        session.add(lab_controller)

    #Setup User account
    if user_name:
        user = User.lazy_create(user_name=user_name.decode('utf8'))
        if password:
            user.password = password.decode('utf8')
        if user_display_name:
            user.display_name = user_display_name.decode('utf8')
        if user_email_address:
            user.email_address = user_email_address.decode('utf8')
        # Ensure the user is in the 'admin' group as an owner.
        # Flush for lazy_create.
        session.flush()
        user_group_assoc = UserGroup.lazy_create(
                user_id=user.user_id, group_id=admin.group_id)
        user_group_assoc.is_owner = True

    # Create distro_expire perm if not present
    try:
        distro_expire_perm = Permission.by_name(u'distro_expire')
    except NoResultFound:
        distro_expire_perm = Permission(u'distro_expire')
        session.add(distro_expire_perm)

    # Create proxy_auth perm if not present
    try:
        proxy_auth_perm = Permission.by_name(u'proxy_auth')
    except NoResultFound:
        proxy_auth_perm = Permission(u'proxy_auth')
        session.add(proxy_auth_perm)

    # Create tag_distro perm if not present
    try:
        tag_distro_perm = Permission.by_name(u'tag_distro')
    except NoResultFound:
        tag_distro_perm = Permission(u'tag_distro')
        admin.permissions.append(tag_distro_perm)

    # Create stop_task perm if not present
    try:
        stop_task_perm = Permission.by_name(u'stop_task')
    except NoResultFound:
        stop_task_perm = Permission(u'stop_task')
        lab_controller.permissions.append(stop_task_perm)
        admin.permissions.append(stop_task_perm)

    # Create secret_visible perm if not present
    try:
        secret_visible_perm = Permission.by_name(u'secret_visible')
    except NoResultFound:
        secret_visible_perm = Permission(u'secret_visible')
        lab_controller.permissions.append(secret_visible_perm)
        admin.permissions.append(secret_visible_perm)

    #Setup Hypervisors Table
    if Hypervisor.query.count() == 0:
        for h in [u'KVM', u'Xen', u'HyperV', u'VMWare']:
            session.add(Hypervisor(hypervisor=h))

    #Setup kernel_type Table
    if KernelType.query.count() == 0:
        for type in [u'default', u'highbank', u'imx', u'omap', u'tegra']:
            session.add(KernelType(kernel_type=type, uboot=False))
        for type in [u'mvebu']:
            session.add(KernelType(kernel_type=type, uboot=True))

    #Setup base Architectures
    if Arch.query.count() == 0:
        for arch in [u'i386', u'x86_64', u'ia64', u'ppc', u'ppc64', u'ppc64le',
                     u's390', u's390x', u'armhfp', u'aarch64', u'arm']:
            session.add(Arch(arch))

    #Setup base power types
    if PowerType.query.count() == 0:
        for power_type in [u'apc_snmp', u'apc_snmp_then_etherwake',
                u'bladecenter', u'bladepap', u'drac', u'ether_wake', u'hyper-v',
                u'ilo', u'integrity', u'ipmilan', u'ipmitool', u'lpar', u'rsa',
                u'virsh', u'wti']:
            session.add(PowerType(power_type))

    #Setup key types
    if Key.query.count() == 0:
        session.add(Key(u'DISKSPACE',True))
        session.add(Key(u'COMMENT'))
        session.add(Key(u'CPUFAMILY',True))
        session.add(Key(u'CPUFLAGS'))
        session.add(Key(u'CPUMODEL'))
        session.add(Key(u'CPUMODELNUMBER', True))
        session.add(Key(u'CPUSPEED',True))
        session.add(Key(u'CPUVENDOR'))
        session.add(Key(u'DISK',True))
        session.add(Key(u'FORMFACTOR'))
        session.add(Key(u'HVM'))
        session.add(Key(u'MEMORY',True))
        session.add(Key(u'MODEL'))
        session.add(Key(u'MODULE'))
        session.add(Key(u'NETWORK'))
        session.add(Key(u'NR_DISKS',True))
        session.add(Key(u'NR_ETH',True))
        session.add(Key(u'NR_IB',True))
        session.add(Key(u'PCIID'))
        session.add(Key(u'PROCESSORS',True))
        session.add(Key(u'RTCERT'))
        session.add(Key(u'SCRATCH'))
        session.add(Key(u'STORAGE'))
        session.add(Key(u'USBID'))
        session.add(Key(u'VENDOR'))
        session.add(Key(u'XENCERT'))
        session.add(Key(u'NETBOOT_METHOD'))

    #Setup ack/nak reposnses
    if Response.query.count() == 0:
        session.add(Response(response=u'ack'))
        session.add(Response(response=u'nak'))

    if RetentionTag.query.count() == 0:
        session.add(RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30))
        session.add(RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60))
        session.add(RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120))
        session.add(RetentionTag(tag=u'active', needs_product=True))
        session.add(RetentionTag(tag=u'audit', needs_product=True))

    config_items = [
        # name, description, numeric
        (u'root_password', u'Plaintext root password for provisioned systems', False),
        (u'root_password_validity', u"Maximum number of days a user's root password is valid for", True),
        (u'guest_name_prefix', u'Prefix for names of dynamic guests in OpenStack', False),
    ]
    for name, description, numeric in config_items:
        ConfigItem.lazy_create(name=name, description=description, numeric=numeric)
    if ConfigItem.by_name(u'root_password').current_value() is None:
        ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0])

    session.commit()
    session.close()
예제 #41
0
파일: init.py 프로젝트: sibiaoluo/beaker
def init_db(user_name=None, password=None, user_display_name=None, user_email_address=None):
    get_engine()
    metadata.create_all()
    session.begin()

    try:
        admin = Group.by_name(u'admin')
    except InvalidRequestError:
        admin     = Group(group_name=u'admin',display_name=u'Admin')

    try:
        lab_controller = Group.by_name(u'lab_controller')
    except InvalidRequestError:
        lab_controller = Group(group_name=u'lab_controller',
                               display_name=u'Lab Controller')
    
    #Setup User account
    if user_name:
        if password:
            user = User(user_name=user_name.decode('utf8'), password=password.decode('utf8'))
            if user_display_name:
                user.display_name = user_display_name.decode('utf8')
            if user_email_address:
                user.email_address = user_email_address.decode('utf8')
            admin.users.append(user)
        else:
            print "Password must be provided with username"
    elif len(admin.users) == 0:
        print "No admin account exists, please create one with --user"
        sys.exit(1)

    # Create distro_expire perm if not present
    try:
        distro_expire_perm = Permission.by_name(u'distro_expire')
    except NoResultFound:
        distro_expire_perm = Permission(u'distro_expire')

    # Create proxy_auth perm if not present
    try:
        proxy_auth_perm = Permission.by_name(u'proxy_auth')
    except NoResultFound:
        proxy_auth_perm = Permission(u'proxy_auth')

    # Create tag_distro perm if not present
    try:
        tag_distro_perm = Permission.by_name(u'tag_distro')
    except NoResultFound:
        tag_distro_perm = Permission(u'tag_distro')
        admin.permissions.append(tag_distro_perm)

    # Create stop_task perm if not present
    try:
        stop_task_perm = Permission.by_name(u'stop_task')
    except NoResultFound:
        stop_task_perm = Permission(u'stop_task')
        lab_controller.permissions.append(stop_task_perm)
        admin.permissions.append(stop_task_perm)

    # Create secret_visible perm if not present
    try:
        secret_visible_perm = Permission.by_name(u'secret_visible')
    except NoResultFound:
        secret_visible_perm = Permission(u'secret_visible')
        lab_controller.permissions.append(secret_visible_perm)
        admin.permissions.append(secret_visible_perm)

    #Setup Hypervisors Table
    if Hypervisor.query.count() == 0:
        kvm       = Hypervisor(hypervisor=u'KVM')
        xen       = Hypervisor(hypervisor=u'Xen')
        hyperv    = Hypervisor(hypervisor=u'HyperV')
        vmware    = Hypervisor(hypervisor=u'VMWare')

    #Setup kernel_type Table
    if KernelType.query.count() == 0:
        default  = KernelType(kernel_type=u'default', uboot=False)
        highbank = KernelType(kernel_type=u'highbank', uboot=False)
        imx      = KernelType(kernel_type=u'imx', uboot=False)
        mvebu    = KernelType(kernel_type=u'mvebu', uboot=True)
        omap     = KernelType(kernel_type=u'omap', uboot=False)
        tegra    = KernelType(kernel_type=u'tegra', uboot=False)

    #Setup base Architectures
    if Arch.query.count() == 0:
        i386   = Arch(u'i386')
        x86_64 = Arch(u'x86_64')
        ia64   = Arch(u'ia64')
        ppc    = Arch(u'ppc')
        ppc64  = Arch(u'ppc64')
        s390   = Arch(u's390')
        s390x  = Arch(u's390x')
        armhfp = Arch(u'armhfp')

    #Setup base power types
    if PowerType.query.count() == 0:
        apc_snmp    = PowerType(u'apc_snmp')
        PowerType(u'apc_snmp_then_etherwake')
        bladecenter = PowerType(u'bladecenter')
        bullpap     = PowerType(u'bladepap')
        drac        = PowerType(u'drac')
        ether_wake  = PowerType(u'ether_wake')
        PowerType(u'hyper-v')
        ilo         = PowerType(u'ilo')
        integrity   = PowerType(u'integrity')
        ipmilan     = PowerType(u'ipmilan')
        ipmitool    = PowerType(u'ipmitool')
        lpar        = PowerType(u'lpar')
        rsa         = PowerType(u'rsa')
        virsh       = PowerType(u'virsh')
        wti         = PowerType(u'wti')

    #Setup key types
    if Key.query.count() == 0:
        DISKSPACE       = Key(u'DISKSPACE',True)
        COMMENT         = Key(u'COMMENT')
        CPUFAMILY       = Key(u'CPUFAMILY',True)
        CPUFLAGS        = Key(u'CPUFLAGS')
        CPUMODEL        = Key(u'CPUMODEL')
        CPUMODELNUMBER  = Key(u'CPUMODELNUMBER', True)
        CPUSPEED        = Key(u'CPUSPEED',True)
        CPUVENDOR       = Key(u'CPUVENDOR')
        DISK            = Key(u'DISK',True)
        FORMFACTOR      = Key(u'FORMFACTOR')
        HVM             = Key(u'HVM')
        MEMORY          = Key(u'MEMORY',True)
        MODEL           = Key(u'MODEL')
        MODULE          = Key(u'MODULE')
        NETWORK         = Key(u'NETWORK')
        NR_DISKS        = Key(u'NR_DISKS',True)
        NR_ETH          = Key(u'NR_ETH',True)
        NR_IB           = Key(u'NR_IB',True)
        PCIID           = Key(u'PCIID')
        PROCESSORS      = Key(u'PROCESSORS',True)
        RTCERT          = Key(u'RTCERT')
        SCRATCH         = Key(u'SCRATCH')
        STORAGE         = Key(u'STORAGE')
        USBID           = Key(u'USBID')
        VENDOR          = Key(u'VENDOR')
        XENCERT         = Key(u'XENCERT')
        NETBOOT         = Key(u'NETBOOT_METHOD')

    #Setup ack/nak reposnses
    if Response.query.count() == 0:
        ACK      = Response(response=u'ack')
        NAK      = Response(response=u'nak')

    if RetentionTag.query.count() == 0:
        SCRATCH         = RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30)
        SIXTYDAYS       = RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60)
        ONETWENTYDAYS   = RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120)
        ACTIVE          = RetentionTag(tag=u'active', needs_product=True)
        AUDIT           = RetentionTag(tag=u'audit', needs_product=True)

    config_items = [
        # name, description, numeric
        (u'root_password', u'Plaintext root password for provisioned systems', False),
        (u'root_password_validity', u"Maximum number of days a user's root password is valid for", True),
        (u'default_guest_memory', u"Default memory (MB) for dynamic guest provisioning", True),
        (u'default_guest_disk_size', u"Default disk size (GB) for dynamic guest provisioning", True),
        (u'guest_name_prefix', u'Prefix for names of dynamic guests in oVirt', False),
    ]
    for name, description, numeric in config_items:
        try:
            ConfigItem.by_name(name)
        except NoResultFound:
            ConfigItem(name=name, description=description, numeric=numeric)
    session.flush()
    if ConfigItem.by_name(u'root_password').current_value() is None:
        ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0])

    session.commit()
    session.close()
예제 #42
0
       _threadpool_executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
    return _threadpool_executor

def update_dirty_jobs():
    work_done = False
    dirty_jobs = Job.query.filter(Job.dirty_version != Job.clean_version)
    for job_id, in dirty_jobs.values(Job.id):
        session.begin()
        try:
            update_dirty_job(job_id)
            session.commit()
        except Exception, e:
            log.exception('Error in update_dirty_job(%s)', job_id)
            session.rollback()
        finally:
            session.close()
        work_done = True
        if event.is_set():
            break
    return work_done

def update_dirty_job(job_id):
    log.debug('Updating dirty job %s', job_id)
    job = Job.by_id(job_id)
    job.update_status()

def process_new_recipes(*args):
    work_done = False
    recipes = MachineRecipe.query\
            .join(MachineRecipe.recipeset).join(RecipeSet.job)\
            .filter(Recipe.status == TaskStatus.new)