def redistribute_routers(provider_id, users=[], redistribute=False): for provider in Provider.objects.filter(id=provider_id): router_map = provider.get_router_distribution() # Print 'before' instance_map = _get_instance_owner_map(provider, users=users) if redistribute: needs_router = provider.identity_set.all().order_by( 'created_by__username') router_map = {key: 0 for key in router_map.keys()} else: needs_router = provider.missing_routers() for identity in needs_router: identity_user = identity.created_by.username if users and identity_user not in users: print "Skipping user %s" % identity_user continue instances = instance_map.get(identity_user, []) if len(instances) > 0: print "Skipping user %s - Reason: User has running instances" % identity_user continue # Select next available router for the identity selected_router = provider.select_router(router_map) # Save happens here: Identity.update_credential(identity, 'router_name', selected_router, replace=True) router_map[selected_router] += 1 provider.get_router_distribution() # Print 'after' return
def redistribute_routers(provider_id, users=[], redistribute=False): for provider in Provider.objects.filter(id=provider_id): router_map = provider.get_router_distribution() # Print 'before' instance_map = _get_instance_owner_map(provider, users=users) if redistribute: needs_router = provider.identity_set.all( ).order_by('created_by__username') router_map = {key: 0 for key in router_map.keys()} else: needs_router = provider.missing_routers() for identity in needs_router: identity_user = identity.created_by.username if users and identity_user not in users: print "Skipping user %s" % identity_user continue instances = instance_map.get(identity_user, []) if len(instances) > 0: print "Skipping user %s - Reason: User has running instances" % identity_user continue # Select next available router for the identity selected_router = provider.select_router(router_map) # Save happens here: Identity.update_credential( identity, 'router_name', selected_router, replace=True ) router_map[selected_router] += 1 provider.get_router_distribution() # Print 'after' return
def monitor_instances_for(provider_id, users=None, print_logs=False, check_allocations=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) celery_logger.addHandler(consolehandler) # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled running_total = 0 for username in sorted(instance_map.keys()): running_instances = instance_map[username] running_total += len(running_instances) identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances] except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % username) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances( identity, core_running_instances) if check_allocations: allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: celery_logger.removeHandler(consolehandler) return running_total
def monitor_instances_for(provider_id, users=None, print_logs=False, check_allocations=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled running_total = 0 if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for username in sorted(instance_map.keys()): running_instances = instance_map[username] running_total += len(running_instances) identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance(driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances ] except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % username) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances(identity, core_running_instances) if check_allocations: allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: _exit_stdout_logging(console_handler) return running_total
def monitor_instances_for(provider_id, users=None, print_logs=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) #For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) #DEVNOTE: Potential slowdown running multiple functions #Break this out when instance-caching is enabled for username in sorted(instance_map.keys()): running_instances = instance_map[username] identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance(driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances ] except Exception as exc: logger.exception("Could not convert running instances for %s" % username) continue else: #No running instances. core_running_instances = [] #Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances(identity, core_running_instances) allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: logger.removeHandler(consolehandler)
def monitor_instances_for(provider_id, users=None, print_logs=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() seen_instances = [] # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for tenant_name in sorted(instance_map.keys()): running_instances = instance_map[tenant_name] identity = _get_identity_from_tenant_name(provider, tenant_name) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances] seen_instances.extend(core_running_instances) except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % tenant_name) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances( identity, core_running_instances) if print_logs: _exit_stdout_logging(console_handler) # return seen_instances NOTE: this has been commented out to avoid PicklingError! # TODO: Uncomment the above, Determine what _we can return_ and return that instead.... return
def monitor_instances_for( provider_id, users=None, print_logs=False, start_date=None, end_date=None ): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() seen_instances = [] # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for tenant_name in sorted(instance_map.keys()): running_instances = instance_map[tenant_name] identity = _get_identity_from_tenant_name(provider, tenant_name) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by ) for inst in running_instances ] seen_instances.extend(core_running_instances) except Exception: celery_logger.exception( "Could not convert running instances for %s" % tenant_name ) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB _cleanup_missing_instances(identity, core_running_instances) if print_logs: _exit_stdout_logging(console_handler) # return seen_instances NOTE: this has been commented out to avoid PicklingError! # TODO: Uncomment the above, Determine what _we can return_ and return that instead.... return