def monitor_machines_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring machines for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from core.models import Application, ProviderMachine, Provider provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't Tucson. if 'iplant cloud - tucson' not in provider.location.lower(): return if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) testable_apps = ProviderMachine.objects.filter(instance_source__provider__id=4).values_list('application_version__application', flat=True).distinct() apps = Application.objects.filter(id__in=testable_apps) for application in apps: if not application.private: validate_public_app(application, provider) # TODO: Add more logic later. if print_logs: logger.removeHandler(consolehandler)
def monitor_instances_for(provider, users=None, print_logs=False): """ Update instances for provider. """ #For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) if print_logs: print_table_header() for username in sorted(instance_map.keys()): instances = instance_map[username] monitor_instances_for_user(provider, username, instances, print_logs) logger.info("Monitoring completed") if print_logs: logger.removeHandler(consolehandler)
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: logger.removeHandler(consolehandler)
def monitor_instances_for(provider_id, users=None, print_logs=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled for username in sorted(instance_map.keys()): running_instances = instance_map[username] identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances] except Exception as exc: logger.exception( "Could not convert running instances for %s" % username) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances( identity, core_running_instances) allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: logger.removeHandler(consolehandler)
def monitor_instances_for(provider_id, users=None, print_logs=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) #For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) #DEVNOTE: Potential slowdown running multiple functions #Break this out when instance-caching is enabled for username in sorted(instance_map.keys()): running_instances = instance_map[username] identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance(driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances ] except Exception as exc: logger.exception("Could not convert running instances for %s" % username) continue else: #No running instances. core_running_instances = [] #Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances(identity, core_running_instances) allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: logger.removeHandler(consolehandler)
def monitor_machines_for(provider_id, print_logs=False, dry_run=False): """ Run the set of tasks related to monitoring machines for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. NEW LOGIC: * Membership and Privacy is dictated at the APPLICATION level. * loop over all machines on the cloud * * If machine is PUBLIC, ensure the APP is public. * * If machine is PRIVATE, ensure the APP is private && sync the membership! * * Ignore the possibility of conflicts, prior schema should be sufficient for ensuring the above two usecases """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't Tucson. if 'iplant cloud - tucson' not in provider.location.lower(): return if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) #STEP 1: get the apps new_public_apps, private_apps = get_public_and_private_apps(provider) #STEP 2: Find conflicts and report them. intersection = set(private_apps.keys()) & set(new_public_apps) if intersection: raise Exception("These applications were listed as BOTH public && private apps. Manual conflict correction required: %s" % intersection) #STEP 3: Apply the changes at app-level #Memoization at this high of a level will help save time account_drivers = {} # Provider -> accountDriver provider_tenant_mapping = {} # Provider -> [{TenantId : TenantName},...] image_maps = {} for app in new_public_apps: make_machines_public(app, account_drivers, dry_run=dry_run) for app, membership in private_apps.items(): make_machines_private(app, membership, account_drivers, provider_tenant_mapping, image_maps, dry_run=dry_run) if print_logs: logger.removeHandler(consolehandler) return
def prune_machines_for(provider_id, print_logs=False, dry_run=False, forced_removal=False): """ Look at the list of machines (as seen by the AccountProvider) if a machine cannot be found in the list, remove it. NOTE: BEFORE CALLING THIS TASK you should ensure that the AccountProvider can see ALL images. Failure to do so will result in any unseen image to be prematurely end-dated and removed from the API/UI. """ provider = Provider.objects.get(id=provider_id) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) if provider.is_active(): account_driver = get_account_driver(provider) db_machines = ProviderMachine.objects.filter(only_current_source(), instance_source__provider=provider) all_projects_map = tenant_id_to_name_map(account_driver) cloud_machines = account_driver.list_all_images() else: db_machines = ProviderMachine.objects.filter( source_in_range(), instance_source__provider=provider) cloud_machines = [] # Don't do anything if cloud machines == [None,[]] if not cloud_machines and not forced_removal: return # Loop1 - End-date All machines in the DB that can NOT be found in the cloud. cloud_machine_ids = [mach.id for mach in cloud_machines] for machine in db_machines: cloud_match = [mach for mach in cloud_machine_ids if mach == machine.identifier] if not cloud_match: remove_machine(machine, dry_run=dry_run) if print_logs: logger.removeHandler(consolehandler)