Esempio n. 1
0
def process_new_recipe(recipe_id):
    recipe = MachineRecipe.by_id(recipe_id)
    if not recipe.distro_tree:
        log.info("recipe ID %s moved from New to Aborted", recipe.id)
        recipe.recipeset.abort(u'Recipe ID %s does not have a distro tree' %
                               recipe.id)
        return
    recipe.systems = []

    # Do the query twice.

    # First query verifies that the distro tree
    # exists in at least one lab that has a matching system.
    systems = recipe.candidate_systems(only_in_lab=True)
    # Second query picks up all possible systems so that as
    # trees appear in other labs those systems will be
    # available.
    all_systems = recipe.candidate_systems(only_in_lab=False)
    # based on above queries, condition on systems but add
    # all_systems.
    log.debug('Counting candidate systems for recipe %s', recipe.id)
    if systems.count():
        log.debug('Computing all candidate systems for recipe %s', recipe.id)
        for system in all_systems:
            # Add matched systems to recipe.
            recipe.systems.append(system)

    # If the recipe only matches one system then bump its priority.
    if config.get('beaker.priority_bumping_enabled', True) and len(
            recipe.systems) == 1:
        old_prio = recipe.recipeset.priority
        try:
            new_prio = TaskPriority.by_index(TaskPriority.index(old_prio) + 1)
        except IndexError:
            # We may already be at the highest priority
            pass
        else:
            log.info("recipe ID %s matches one system, bumping priority" %
                     recipe.id)
            recipe.recipeset.record_activity(user=None,
                                             service=u'Scheduler',
                                             action=u'Changed',
                                             field=u'Priority',
                                             old=unicode(old_prio),
                                             new=unicode(new_prio))
            recipe.recipeset.priority = new_prio
    recipe.virt_status = recipe.check_virtualisability()
    if not recipe.systems and not _virt_possible(recipe):
        log.info("recipe ID %s moved from New to Aborted" % recipe.id)
        recipe.recipeset.abort(u'Recipe ID %s does not match any systems' %
                               recipe.id)
        return
    recipe.process()
    log.info("recipe ID %s moved from New to Processed" % recipe.id)
    for guestrecipe in recipe.guests:
        guestrecipe.process()
Esempio n. 2
0
def abort_dead_recipe(recipe_id):
    recipe = MachineRecipe.by_id(recipe_id)
    # There are two ways to have your recipe aborted;
    # no distros, or no automated systems available
    if len(recipe.distro_tree.lab_controller_assocs) == 0:
        msg = u"R:%s does not have a valid distro tree, aborting." % recipe.id
        log.info(msg)
        recipe.recipeset.abort(msg)
    else:
        msg = u"R:%s does not match any systems, aborting." % recipe.id
        log.info(msg)
        recipe.recipeset.abort(msg)
Esempio n. 3
0
def abort_dead_recipe(recipe_id):
    recipe = MachineRecipe.by_id(recipe_id)
    # There are two ways to have your recipe aborted;
    # no distros, or no automated systems available
    if recipe.distro_tree and len(recipe.distro_tree.lab_controller_assocs) == 0:
        msg = u"R:%s does not have a valid distro tree, aborting." % recipe.id
        log.info(msg)
        recipe.recipeset.abort(msg)
    else:
        msg = u"R:%s does not match any systems, aborting." % recipe.id
        log.info(msg)
        recipe.recipeset.abort(msg)
Esempio n. 4
0
def process_new_recipe(recipe_id):
    recipe = MachineRecipe.by_id(recipe_id)
    if not recipe.distro_tree:
        log.info("recipe ID %s moved from New to Aborted", recipe.id)
        recipe.recipeset.abort(u'Recipe ID %s does not have a distro tree' % recipe.id)
        return
    recipe.systems = []

    # Do the query twice.

    # First query verifies that the distro tree 
    # exists in at least one lab that has a matching system.
    systems = recipe.distro_tree.systems_filter(
                                recipe.recipeset.job.owner,
                                recipe.host_requires,
                                only_in_lab=True)
    # Second query picks up all possible systems so that as 
    # trees appear in other labs those systems will be 
    # available.
    all_systems = recipe.distro_tree.systems_filter(
                                recipe.recipeset.job.owner,
                                recipe.host_requires,
                                only_in_lab=False)
    # based on above queries, condition on systems but add
    # all_systems.
    if systems.count():
        for system in all_systems:
            # Add matched systems to recipe.
            recipe.systems.append(system)

    # If the recipe only matches one system then bump its priority.
    if len(recipe.systems) == 1:
        try:
            log.info("recipe ID %s matches one system, bumping priority" % recipe.id)
            recipe.recipeset.priority = TaskPriority.by_index(
                    TaskPriority.index(recipe.recipeset.priority) + 1)
        except IndexError:
            # We may already be at the highest priority
            pass
    recipe.virt_status = recipe.check_virtualisability()
    if not recipe.systems and not _virt_possible(recipe):
        log.info("recipe ID %s moved from New to Aborted" % recipe.id)
        recipe.recipeset.abort(u'Recipe ID %s does not match any systems' % recipe.id)
        return
    recipe.process()
    log.info("recipe ID %s moved from New to Processed" % recipe.id)
    for guestrecipe in recipe.guests:
        guestrecipe.process()
Esempio n. 5
0
def process_new_recipe(recipe_id):
    recipe = MachineRecipe.by_id(recipe_id)
    recipe.systems = []

    # Do the query twice.

    # First query verifies that the distro tree 
    # exists in at least one lab that has a matching system.
    # But if it's a user-supplied distro, we don't have a
    # distro tree to match the lab against - so it will return
    # all possible systems
    systems = recipe.candidate_systems(only_in_lab=True)
    # Second query picks up all possible systems so that as 
    # trees appear in other labs those systems will be 
    # available.
    all_systems = recipe.candidate_systems(only_in_lab=False)
    # based on above queries, condition on systems but add
    # all_systems.
    log.debug('Counting candidate systems for recipe %s', recipe.id)
    if systems.count():
        log.debug('Computing all candidate systems for recipe %s', recipe.id)
        for system in all_systems:
            # Add matched systems to recipe.
            recipe.systems.append(system)

    # If the recipe only matches one system then bump its priority.
    if config.get('beaker.priority_bumping_enabled', True) and len(recipe.systems) == 1:
        old_prio = recipe.recipeset.priority
        try:
            new_prio = TaskPriority.by_index(TaskPriority.index(old_prio) + 1)
        except IndexError:
            # We may already be at the highest priority
            pass
        else:
            log.info("recipe ID %s matches one system, bumping priority" % recipe.id)
            recipe.recipeset.record_activity(user=None, service=u'Scheduler',
                    action=u'Changed', field=u'Priority',
                    old=unicode(old_prio), new=unicode(new_prio))
            recipe.recipeset.priority = new_prio
    recipe.virt_status = recipe.check_virtualisability()
    if not recipe.systems and not _virt_possible(recipe):
        log.info("recipe ID %s moved from New to Aborted" % recipe.id)
        recipe.recipeset.abort(u'Recipe ID %s does not match any systems' % recipe.id)
        return
    recipe.process()
    log.info("recipe ID %s moved from New to Processed" % recipe.id)
    for guestrecipe in recipe.guests:
        guestrecipe.process()
Esempio n. 6
0
def schedule_queued_recipe(recipe_id, guest_recipe_id=None):
    guest_recipe = aliased(Recipe)
    guest_distros_map = aliased(LabControllerDistroTree)
    guest_labcontroller = aliased(LabController)
    # This query will return all the systems that a recipe is
    # able to run on. A system is deemed eligible if:
    # * If the recipe's distro tree is available to the system's lab controller
    # * The system is available (see the filter criteria).
    # * If it's a host recipe, then the system needs to be on a lab controller
    #   that can access the distro tree of both the host recipe,
    #   and the guest recipe.
    systems = System.query.join(System.queued_recipes) \
        .outerjoin(System.cpu) \
        .join(Recipe.recipeset, RecipeSet.job) \
        .join(System.lab_controller, LabController._distro_trees)\
        .join((DistroTree,
            and_(LabControllerDistroTree.distro_tree_id ==
                DistroTree.id, Recipe.distro_tree_id == DistroTree.id)))\
        .outerjoin((machine_guest_map,
            Recipe.id == machine_guest_map.c.machine_recipe_id))\
        .outerjoin((guest_recipe,
            machine_guest_map.c.guest_recipe_id == guest_recipe.id ))\
        .outerjoin((guest_distros_map,
            guest_recipe.distro_tree_id == guest_distros_map.distro_tree_id))\
        .outerjoin((guest_labcontroller,
            guest_distros_map.lab_controller_id == guest_labcontroller.id))\
        .filter(Recipe.id == recipe_id) \
        .filter(or_(guest_recipe.id == guest_recipe_id,
            guest_recipe.id == None))\
        .filter(and_(System.user == None,
                or_(guest_distros_map.id == None,
                    and_(guest_distros_map.id != None,
                        guest_labcontroller.id == LabController.id,
                        ),
                   ),
                LabController.disabled == False,
                or_(System.loan_id == None,
                    System.loan_id == Job.owner_id,
                   ),
                    ), # and
                )

    # We reapply this filter here in case a peer recipe has locked the recipe
    # set in to a particular lab controller earlier in this scheduling pass
    recipe = MachineRecipe.by_id(recipe_id)
    if recipe.recipeset.lab_controller:
        systems = systems.filter(
                     System.lab_controller==recipe.recipeset.lab_controller)

    # Something earlier in this pass meant we can't schedule this recipe
    # right now after all. We'll try again next pass.
    if not systems.count():
        return

    # Order systems by owner, then Group, finally shared for everyone.
    # FIXME Make this configurable, so that a user can specify their scheduling
    # Implemented order, still need to do pool
    # preference from the job.
    # <recipe>
    #  <autopick order='sequence|random'>
    #   <pool>owner</pool>
    #   <pool>groups</pool>
    #   <pool>public</pool>
    #  </autopick>
    # </recipe>
    user = recipe.recipeset.job.owner
    if True: #FIXME if pools are defined add them here in the order requested.
        systems = System.scheduler_ordering(user, query=systems)
    if recipe.autopick_random:
        system = systems[random.randrange(0,systems.count())]
    else:
        system = systems.first()

    log.debug("System : %s is available for Recipe %s" % (system, recipe.id))
    # Check to see if user still has proper permissions to use the system.
    # Remember the mapping of available systems could have happend hours or even
    # days ago and groups or loans could have been put in place since.
    if not recipe.candidate_systems().filter(System.id == system.id).first():
        log.debug("System : %s recipe: %s no longer has access. removing" % (system, 
                                                                             recipe.id))
        recipe.systems.remove(system)
        return

    recipe.resource = SystemResource(system=system)
    # Reserving the system may fail here if someone stole it out from
    # underneath us, but that is fine...
    recipe.resource.allocate()
    recipe.schedule()
    recipe.createRepo()
    recipe.recipeset.lab_controller = system.lab_controller
    recipe.systems = []
    # Create the watchdog without an Expire time.
    log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system))
    recipe.watchdog = Watchdog()
    log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id)

    for guestrecipe in recipe.guests:
        guestrecipe.resource = GuestResource()
        guestrecipe.resource.allocate()
        guestrecipe.schedule()
        guestrecipe.createRepo()
        guestrecipe.watchdog = Watchdog()
        log.info('recipe ID %s guest %s moved from Queued to Scheduled',
                recipe.id, guestrecipe.id)
Esempio n. 7
0
def schedule_queued_recipes(*args):
    session.begin()
    try:
        # This query returns a queued host recipe and and the guest which has
        # the most recent distro tree. It is to be used as a derived table.
        latest_guest_distro = select([machine_guest_map.c.machine_recipe_id.label('host_id'),
            func.max(DistroTree.date_created).label('latest_distro_date')],
            from_obj=[machine_guest_map.join(GuestRecipe.__table__,
                    machine_guest_map.c.guest_recipe_id==GuestRecipe.__table__.c.id). \
                join(Recipe.__table__).join(DistroTree.__table__)],
            whereclause=Recipe.status=='Queued',
            group_by=machine_guest_map.c.machine_recipe_id).alias()

        hosts_lab_controller_distro_map = aliased(LabControllerDistroTree)
        hosts_distro_tree = aliased(DistroTree)
        guest_recipe = aliased(Recipe)
        guests_distro_tree = aliased(DistroTree)
        guests_lab_controller = aliased(LabController)

        # This query will return queued recipes that are eligible to be scheduled.
        # They are determined to be eligible if:
        # * They are clean
        # * There are systems available (see the filter criteria) in lab controllers where
        #   the recipe's distro tree is available.
        # * If it is a host recipe, the most recently created distro of all
        #   the guest recipe's distros is available in at least one of the same
        #   lab controllers as that of the host's distro tree.
        #
        # Also note that we do not try to handle the situation where the guest and host never
        # have a common labcontroller. In that situation the host and guest would stay queued
        # until that situation was rectified.
        recipes = MachineRecipe.query\
            .join(Recipe.recipeset, RecipeSet.job)\
            .filter(Job.dirty_version == Job.clean_version)\
            .outerjoin((guest_recipe, MachineRecipe.guests))\
            .outerjoin((guests_distro_tree, guest_recipe.distro_tree_id == guests_distro_tree.id))\
            .outerjoin((latest_guest_distro,
                and_(latest_guest_distro.c.host_id == MachineRecipe.id,
                    latest_guest_distro.c.latest_distro_date == \
                    guests_distro_tree.date_created)))\
            .outerjoin(guests_distro_tree.lab_controller_assocs, guests_lab_controller)\
            .join(Recipe.systems)\
            .join((hosts_distro_tree, hosts_distro_tree.id == MachineRecipe.distro_tree_id))\
            .join((hosts_lab_controller_distro_map, hosts_distro_tree.lab_controller_assocs),
                (LabController, and_(
                    hosts_lab_controller_distro_map.lab_controller_id == LabController.id,
                    System.lab_controller_id == LabController.id)))\
            .filter(
                and_(Recipe.status == TaskStatus.queued,
                    System.user == None,
                    LabController.disabled == False,
                    or_(
                        RecipeSet.lab_controller == None,
                        RecipeSet.lab_controller_id == System.lab_controller_id,
                       ),
                    or_(
                        System.loan_id == None,
                        System.loan_id == Job.owner_id,
                       ),
                    or_(
                        # We either have no guest
                        guest_recipe.id == None,
                        # Or we have a guest of which the latest
                        # is in a common lab controller.
                        and_(guests_lab_controller.id == LabController.id,
                            latest_guest_distro.c.latest_distro_date != None
                            ),
                        ) # or
                    ) # and
                  )
        # Get out of here if we have no recipes
        if not recipes.count():
            return False
        # This should be the guest recipe with the latest distro.
        # We return it in this query, to save us from re-running the
        # derived table query in schedule_queued_recipe()
        recipes = recipes.add_column(guest_recipe.id)
        # Effective priority is given in the following order:
        # * Multi host recipes with already scheduled siblings
        # * Priority level (i.e Normal, High etc)
        # * RecipeSet id
        # * Recipe id
        recipes = recipes.order_by(RecipeSet.lab_controller == None). \
            order_by(RecipeSet.priority.desc()). \
            order_by(RecipeSet.id). \
            order_by(MachineRecipe.id)
        # Don't do a GROUP BY before here, it is not needed.
        recipes = recipes.group_by(MachineRecipe.id)
        log.debug("Entering schedule_queued_recipes")
        for recipe_id, guest_recipe_id in recipes.values(MachineRecipe.id, guest_recipe.id):
            session.begin(nested=True)
            try:
                schedule_queued_recipe(recipe_id, guest_recipe_id)
                session.commit()
            except (StaleSystemUserException, InsufficientSystemPermissions,
                 StaleTaskStatusException), e:
                # Either
                # System user has changed before
                # system allocation
                # or
                # System permissions have changed before
                # system allocation
                # or
                # Something has moved our status on from queued
                # already.
                log.warn(str(e))
                session.rollback()
            except Exception, e:
                log.exception('Error in schedule_queued_recipe(%s)', recipe_id)
                session.rollback()
                session.begin(nested=True)
                try:
                    recipe=MachineRecipe.by_id(recipe_id)
                    recipe.recipeset.abort(u"Aborted in schedule_queued_recipe: %s" % e)
                    session.commit()
                except Exception, e:
                    log.exception("Error during error handling in schedule_queued_recipe: %s" % e)
                    session.rollback()
Esempio n. 8
0
def schedule_queued_recipe(recipe_id, guest_recipe_id=None):
    log.debug('Selecting a system for recipe %s', recipe_id)
    guest_recipe = aliased(Recipe)
    guest_distros_map = aliased(LabControllerDistroTree)
    guest_labcontroller = aliased(LabController)
    # This query will return all the systems that a recipe is
    # able to run on. A system is deemed eligible if:
    # * If the recipe's distro tree is available to the system's lab controller
    # * The system is available (see the filter criteria).
    # * If it's a host recipe, then the system needs to be on a lab controller
    #   that can access the distro tree of both the host recipe,
    #   and the guest recipe.
    systems = System.query.join(System.queued_recipes) \
        .outerjoin(System.cpu) \
        .join(Recipe.recipeset, RecipeSet.job) \
        .join(System.lab_controller, LabController._distro_trees)\
        .join((DistroTree,
            and_(LabControllerDistroTree.distro_tree_id ==
                DistroTree.id, Recipe.distro_tree_id == DistroTree.id)))\
        .outerjoin((machine_guest_map,
            Recipe.id == machine_guest_map.c.machine_recipe_id))\
        .outerjoin((guest_recipe,
            machine_guest_map.c.guest_recipe_id == guest_recipe.id ))\
        .outerjoin((guest_distros_map,
            guest_recipe.distro_tree_id == guest_distros_map.distro_tree_id))\
        .outerjoin((guest_labcontroller,
            guest_distros_map.lab_controller_id == guest_labcontroller.id))\
        .filter(Recipe.id == recipe_id) \
        .filter(or_(guest_recipe.id == guest_recipe_id,
            guest_recipe.id == None))\
        .filter(and_(System.user == None,
                or_(guest_distros_map.id == None,
                    and_(guest_distros_map.id != None,
                        guest_labcontroller.id == LabController.id,
                        ),
                   ),
                LabController.disabled == False,
                or_(System.loan_id == None,
                    System.loan_id == Job.owner_id,
                   ),
                    ), # and
                )

    # We reapply this filter here in case a peer recipe has locked the recipe
    # set in to a particular lab controller earlier in this scheduling pass
    recipe = MachineRecipe.by_id(recipe_id)
    if recipe.recipeset.lab_controller:
        systems = systems.filter(
            System.lab_controller == recipe.recipeset.lab_controller)

    # Something earlier in this pass meant we can't schedule this recipe
    # right now after all. We'll try again next pass.
    if not systems.count():
        return

    # Order systems by owner, then Group, finally shared for everyone.
    # FIXME Make this configurable, so that a user can specify their scheduling
    # Implemented order, still need to do pool
    # preference from the job.
    # <recipe>
    #  <autopick order='sequence|random'>
    #   <pool>owner</pool>
    #   <pool>groups</pool>
    #   <pool>public</pool>
    #  </autopick>
    # </recipe>
    user = recipe.recipeset.job.owner
    if True:  #FIXME if pools are defined add them here in the order requested.
        systems = System.scheduler_ordering(user, query=systems)
    if recipe.autopick_random:
        system = systems[random.randrange(0, systems.count())]
    else:
        system = systems.first()

    log.debug("System : %s is available for Recipe %s" % (system, recipe.id))
    # Check to see if user still has proper permissions to use the system.
    # Remember the mapping of available systems could have happend hours or even
    # days ago and groups or loans could have been put in place since.
    if not recipe.candidate_systems().filter(System.id == system.id).first():
        log.debug("System : %s recipe: %s no longer has access. removing" %
                  (system, recipe.id))
        recipe.systems.remove(system)
        return

    recipe.resource = SystemResource(system=system)
    # Reserving the system may fail here if someone stole it out from
    # underneath us, but that is fine...
    recipe.resource.allocate()
    recipe.schedule()
    recipe.createRepo()
    recipe.recipeset.lab_controller = system.lab_controller
    recipe.systems = []
    # Create the watchdog without an Expire time.
    log.debug("Created watchdog for recipe id: %s and system: %s" %
              (recipe.id, system))
    recipe.watchdog = Watchdog()
    log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id)

    for guestrecipe in recipe.guests:
        guestrecipe.resource = GuestResource()
        guestrecipe.resource.allocate()
        guestrecipe.schedule()
        guestrecipe.createRepo()
        guestrecipe.watchdog = Watchdog()
        log.info('recipe ID %s guest %s moved from Queued to Scheduled',
                 recipe.id, guestrecipe.id)
Esempio n. 9
0
def schedule_queued_recipes(*args):
    work_done = False
    session.begin()
    try:
        # This query returns a queued host recipe and and the guest which has
        # the most recent distro tree. It is to be used as a derived table.
        latest_guest_distro = select([machine_guest_map.c.machine_recipe_id.label('host_id'),
            func.max(DistroTree.date_created).label('latest_distro_date')],
            from_obj=[machine_guest_map.join(GuestRecipe.__table__,
                    machine_guest_map.c.guest_recipe_id==GuestRecipe.__table__.c.id). \
                join(Recipe.__table__).join(DistroTree.__table__)],
            whereclause=Recipe.status=='Queued',
            group_by=machine_guest_map.c.machine_recipe_id).alias()

        hosts_lab_controller_distro_map = aliased(LabControllerDistroTree)
        hosts_distro_tree = aliased(DistroTree)
        guest_recipe = aliased(Recipe)
        guests_distro_tree = aliased(DistroTree)
        guests_lab_controller = aliased(LabController)

        # This query will return queued recipes that are eligible to be scheduled.
        # They are determined to be eligible if:
        # * They are clean
        # * There are systems available (see the filter criteria) in lab controllers where
        #   the recipe's distro tree is available.
        # * If it is a host recipe, the most recently created distro of all
        #   the guest recipe's distros is available in at least one of the same
        #   lab controllers as that of the host's distro tree.
        #
        # Also note that we do not try to handle the situation where the guest and host never
        # have a common labcontroller. In that situation the host and guest would stay queued
        # until that situation was rectified.
        recipes = MachineRecipe.query\
            .join(Recipe.recipeset, RecipeSet.job)\
            .filter(Job.dirty_version == Job.clean_version)\
            .outerjoin((guest_recipe, MachineRecipe.guests))\
            .outerjoin((guests_distro_tree, guest_recipe.distro_tree_id == guests_distro_tree.id))\
            .outerjoin((latest_guest_distro,
                and_(latest_guest_distro.c.host_id == MachineRecipe.id,
                    latest_guest_distro.c.latest_distro_date == \
                    guests_distro_tree.date_created)))\
            .outerjoin(guests_distro_tree.lab_controller_assocs, guests_lab_controller)\
            .join(Recipe.systems)\
            .join((hosts_distro_tree, hosts_distro_tree.id == MachineRecipe.distro_tree_id))\
            .join((hosts_lab_controller_distro_map, hosts_distro_tree.lab_controller_assocs),
                (LabController, and_(
                    hosts_lab_controller_distro_map.lab_controller_id == LabController.id,
                    System.lab_controller_id == LabController.id)))\
            .filter(
                and_(Recipe.status == TaskStatus.queued,
                    System.user == None,
                    LabController.disabled == False,
                    or_(
                        RecipeSet.lab_controller == None,
                        RecipeSet.lab_controller_id == System.lab_controller_id,
                       ),
                    or_(
                        System.loan_id == None,
                        System.loan_id == Job.owner_id,
                       ),
                    or_(
                        # We either have no guest
                        guest_recipe.id == None,
                        # Or we have a guest of which the latest
                        # is in a common lab controller.
                        and_(guests_lab_controller.id == LabController.id,
                            latest_guest_distro.c.latest_distro_date != None
                            ),
                        ) # or
                    ) # and
                  )
        # This should be the guest recipe with the latest distro.
        # We return it in this query, to save us from re-running the
        # derived table query in schedule_queued_recipe()
        recipes = recipes.add_column(guest_recipe.id)
        # Effective priority is given in the following order:
        # * Multi host recipes with already scheduled siblings
        # * Priority level (i.e Normal, High etc)
        # * RecipeSet id
        # * Recipe id
        recipes = recipes.order_by(RecipeSet.lab_controller == None). \
            order_by(RecipeSet.priority.desc()). \
            order_by(RecipeSet.id). \
            order_by(MachineRecipe.id)
        # Don't do a GROUP BY before here, it is not needed.
        recipes = recipes.group_by(MachineRecipe.id)
        for recipe_id, guest_recipe_id in recipes.values(
                MachineRecipe.id, guest_recipe.id):
            session.begin(nested=True)
            try:
                schedule_queued_recipe(recipe_id, guest_recipe_id)
                session.commit()
            except (StaleSystemUserException, InsufficientSystemPermissions,
                    StaleTaskStatusException), e:
                # Either
                # System user has changed before
                # system allocation
                # or
                # System permissions have changed before
                # system allocation
                # or
                # Something has moved our status on from queued
                # already.
                log.warn(str(e))
                session.rollback()
            except Exception, e:
                log.exception('Error in schedule_queued_recipe(%s)', recipe_id)
                session.rollback()
                session.begin(nested=True)
                try:
                    recipe = MachineRecipe.by_id(recipe_id)
                    recipe.recipeset.abort(
                        u"Aborted in schedule_queued_recipe: %s" % e)
                    session.commit()
                except Exception, e:
                    log.exception(
                        "Error during error handling in schedule_queued_recipe: %s"
                        % e)
                    session.rollback()