def run_policy_once(): from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite models = [Instance,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser] objects = [] deleted_objects = [] for m in models: res = m.objects.filter(Q(policed__lt=F('updated')) | Q(policed=None)) objects.extend(res) res = m.deleted_objects.filter(Q(policed__lt=F('updated')) | Q(policed=None)) deleted_objects.extend(res) for o in objects: execute_model_policy(o, o.deleted) for o in deleted_objects: execute_model_policy(o, True) # Reap non-sync'd models here reaped = [Slice] for m in reaped: dobjs = m.deleted_objects.all() for d in dobjs: deps = walk_inv_deps(noop, d) if (not deps): print 'Purging object %r'%d d.delete(purge=True) try: reset_queries() except: # this shouldn't happen, but in case it does, catch it... logger.log_exc("exception in reset_queries")
def run_policy(): from core.models import Sliver, Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerUser, ControllerSite while (True): start = time.time() models = [ Sliver, Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser ] objects = [] deleted_objects = [] for m in models: res = m.objects.filter( Q(policed__lt=F('updated')) | Q(policed=None)) objects.extend(res) res = m.deleted_objects.filter( Q(policed__lt=F('updated')) | Q(policed=None)) deleted_objects.extend(res) for o in objects: execute_model_policy(o, o.deleted) for o in deleted_objects: execute_model_policy(o, True) # Reap non-sync'd models here reaped = [Slice] for m in reaped: dobjs = m.deleted_objects.all() for d in dobjs: deps = walk_inv_deps(noop, d) if (not deps): print 'Purging object %r' % d d.delete(purge=True) try: reset_queries() except: # this shouldn't happen, but in case it does, catch it... logger.log_exc("exception in reset_queries") if (time.time() - start < 1): time.sleep(1)
def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return # These are the models whose children get deleted when they are delete_policy_models = ['Slice','Sliver','Network'] sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s'%sender_name noargs = False if (not deleted): walk_inv_deps(update_dep, instance) walk_deps(update_wp, instance) elif (sender_name in delete_policy_models): walk_inv_deps(delete_if_inactive, instance) try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: if (deleted): try: policy_handler.handle_delete(instance) except AttributeError: pass else: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") try: instance.policed=datetime.now() instance.save(update_fields=['policed']) except: logging.error('Object %r is defective'%instance) bad_instances.append(instance)
def execute_model_policy(instance, deleted): # Automatic dirtying walk_inv_deps(update_dep, instance) sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s'%sender_name noargs = False if deleted: walk_inv_deps(delete_if_inactive, instance) else: try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") print "Policy Exceution Error" instance.policed=datetime.now() instance.save(update_fields=['policed'])
def execute_model_policy(instance, deleted): # Automatic dirtying walk_inv_deps(update_dep, instance) sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s' % sender_name noargs = False if deleted: walk_inv_deps(delete_if_inactive, instance) else: try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") print "Policy Exceution Error" instance.policed = datetime.now() instance.save(update_fields=['policed'])
def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return # These are the models whose children get deleted when they are delete_policy_models = ['Slice', 'Instance', 'Network'] sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s' % sender_name noargs = False if (not deleted): walk_inv_deps(update_dep, instance) walk_deps(update_wp, instance) elif (sender_name in delete_policy_models): walk_inv_deps(delete_if_inactive, instance) try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: if (deleted): try: policy_handler.handle_delete(instance) except AttributeError: pass else: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") try: instance.policed = datetime.now() instance.save(update_fields=['policed']) except: logging.error('Object %r is defective' % instance) bad_instances.append(instance)
def run(self): if not self.driver.enabled: return if (self.driver_kind=="openstack") and (not self.driver.has_openstack): return while True: try: logger.info('Waiting for event') tBeforeWait = time.time() self.wait_for_event(timeout=30) logger.info('Observer woke up') # Set of whole steps that failed failed_steps = [] # Set of individual objects within steps that failed failed_step_objects = set() for S in self.ordered_steps: step = self.step_lookup[S] start_time=time.time() sync_step = step(driver=self.driver) sync_step.__name__ = step.__name__ sync_step.dependencies = [] try: mlist = sync_step.provides for m in mlist: sync_step.dependencies.extend(self.model_dependency_graph[m.__name__]) except KeyError: pass sync_step.debug_mode = debug_mode should_run = False try: # Various checks that decide whether # this step runs or not self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour should_run = True except StepNotReady: logging.info('Step not ready: %s'%sync_step.__name__) failed_steps.append(sync_step) except: failed_steps.append(sync_step) if (should_run): try: duration=time.time() - start_time logger.info('Executing step %s' % sync_step.__name__) # ********* This is the actual sync step #import pdb #pdb.set_trace() failed_objects = sync_step(failed=list(failed_step_objects)) self.check_duration(sync_step, duration) if failed_objects: failed_step_objects.update(failed_objects) self.update_run_time(sync_step) except: failed_steps.append(S) self.save_run_times() except: logger.log_exc("Exception in observer run loop") traceback.print_exc()
def sync_record(self, controller_slice): logger.info("sync'ing slice controller %s" % controller_slice) if not controller_slice.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice.controller) return controller_users = ControllerUser.objects.filter( user=controller_slice.slice.creator, controller=controller_slice.controller) if not controller_users: raise Exception( "slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name)) else: controller_user = controller_users[0] roles = ['Admin'] max_instances = int(controller_slice.slice.max_slivers) tenant_fields = { 'endpoint': controller_slice.controller.auth_url, 'admin_user': controller_slice.controller.admin_user, 'admin_password': controller_slice.controller.admin_password, 'admin_tenant': 'admin', 'tenant': controller_slice.slice.name, 'tenant_description': controller_slice.slice.description, 'roles': roles, 'name': controller_user.user.email, 'ansible_tag': '%s@%s' % (controller_slice.slice.name, controller_slice.controller.name), 'max_instances': max_instances } expected_num = len(roles) + 1 res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num) tenant_id = res[0]['id'] if (not controller_slice.tenant_id): try: driver = OpenStackDriver().admin_driver( controller=controller_slice.controller) driver.shell.nova.quotas.update( tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers)) except: logger.log_exc('Could not update quota for %s' % controller_slice.slice.name) raise Exception('Could not update quota for %s' % controller_slice.slice.name) controller_slice.tenant_id = tenant_id controller_slice.backend_status = '1 - OK' controller_slice.save()