Beispiel #1
0
def monitor_sizes_for(provider_id, print_logs=False):
    """
    Run the set of tasks related to monitoring sizes for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    from service.driver import get_admin_driver

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    provider = Provider.objects.get(id=provider_id)
    admin_driver = get_admin_driver(provider)
    # Non-End dated sizes on this provider
    db_sizes = Size.objects.filter(only_current(), provider=provider)
    all_sizes = admin_driver.list_sizes()
    seen_sizes = []
    for cloud_size in all_sizes:
        core_size = convert_esh_size(cloud_size, provider.uuid)
        seen_sizes.append(core_size)

    now_time = timezone.now()
    needs_end_date = [size for size in db_sizes if size not in seen_sizes]
    for size in needs_end_date:
        celery_logger.debug("End dating inactive size: %s" % size)
        size.end_date = now_time
        size.save()

    if print_logs:
        celery_logger.removeHandler(consolehandler)
Beispiel #2
0
def monitor_sizes_for(provider_id, print_logs=False):
    """
    Run the set of tasks related to monitoring sizes for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    from service.driver import get_admin_driver

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    provider = Provider.objects.get(id=provider_id)
    admin_driver = get_admin_driver(provider)
    # Non-End dated sizes on this provider
    db_sizes = Size.objects.filter(only_current(), provider=provider)
    all_sizes = admin_driver.list_sizes()
    seen_sizes = []
    for cloud_size in all_sizes:
        core_size = convert_esh_size(cloud_size, provider.uuid)
        seen_sizes.append(core_size)

    now_time = timezone.now()
    needs_end_date = [size for size in db_sizes if size not in seen_sizes]
    for size in needs_end_date:
        celery_logger.debug("End dating inactive size: %s" % size)
        size.end_date = now_time
        size.save()

    if print_logs:
        celery_logger.removeHandler(consolehandler)
Beispiel #3
0
def monitor_instances_for(provider_id, users=None,
                          print_logs=False, check_allocations=False, start_date=None, end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return

    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    running_total = 0
    for username in sorted(instance_map.keys()):
        running_instances = instance_map[username]
        running_total += len(running_instances)
        identity = _get_identity_from_tenant_name(provider, username)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(
                        driver,
                        inst,
                        identity.provider.uuid,
                        identity.uuid,
                        identity.created_by) for inst in running_instances]
            except Exception as exc:
                celery_logger.exception(
                    "Could not convert running instances for %s" %
                    username)
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(
            identity,
            core_running_instances)
        if check_allocations:
            allocation_result = user_over_allocation_enforcement(
                provider, username,
                print_logs, start_date, end_date)
    if print_logs:
        celery_logger.removeHandler(consolehandler)
    return running_total
Beispiel #4
0
def prune_machines_for(
        provider_id, print_logs=False, dry_run=False, forced_removal=False):
    """
    Look at the list of machines (as seen by the AccountProvider)
    if a machine cannot be found in the list, remove it.
    NOTE: BEFORE CALLING THIS TASK you should ensure
    that the AccountProvider can see ALL images.
    Failure to do so will result in any image unseen by the admin
    to be prematurely end-dated and removed from the API/UI.
    """
    provider = Provider.objects.get(id=provider_id)
    now = timezone.now()
    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)
    celery_logger.info("Starting prune_machines for Provider %s @ %s"
                       % (provider, now))

    if provider.is_active():
        account_driver = get_account_driver(provider)
        db_machines = ProviderMachine.objects.filter(
            only_current_source(), instance_source__provider=provider)
        cloud_machines = account_driver.list_all_images()
    else:
        db_machines = ProviderMachine.objects.filter(
                source_in_range(),  # like 'only_current..' w/o active_provider
                instance_source__provider=provider)
        cloud_machines = []

    # Don't do anything if cloud machines == [None,[]]
    if not cloud_machines and not forced_removal:
        return

    # Loop 1 - End-date All machines in the DB that
    # can NOT be found in the cloud.
    mach_count = _end_date_missing_database_machines(
        db_machines, cloud_machines, now=now, dry_run=dry_run)

    # Loop 2 and 3 - Capture all (still-active) versions without machines,
    # and all applications without versions.
    # These are 'outliers' and mainly here for safety-check purposes.
    ver_count = _remove_versions_without_machines(now=now)
    app_count = _remove_applications_without_versions(now=now)

    # Loop 4 - All 'Application' DB objects require
    # >=1 Version with >=1 ProviderMachine (ACTIVE!)
    # Apps that don't meet this criteria should be end-dated.
    app_count += _update_improperly_enddated_applications(now)

    celery_logger.info(
        "prune_machines completed for Provider %s : "
        "%s Applications, %s versions and %s machines pruned."
        % (provider, app_count, ver_count, mach_count))
    if print_logs:
        celery_logger.removeHandler(consolehandler)
Beispiel #5
0
def monitor_machines_for(provider_id, print_logs=False, dry_run=False):
    """
    Run the set of tasks related to monitoring machines for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.

    NEW LOGIC:
    * Membership and Privacy is dictated at the APPLICATION level.
    * loop over all machines on the cloud
    *   * If machine is PUBLIC, ensure the APP is public.
    *   * If machine is PRIVATE, ensure the APP is private && sync the membership!
    *   * Ignore the possibility of conflicts, prior schema should be sufficient for ensuring the above two usecases
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't Tucson.
    if 'iplant cloud - tucson' not in provider.location.lower():
        return
    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    #STEP 1: get the apps
    new_public_apps, private_apps = get_public_and_private_apps(provider)

    #STEP 2: Find conflicts and report them.
    intersection = set(private_apps.keys()) & set(new_public_apps)
    if intersection:
        raise Exception(
            "These applications were listed as BOTH public && private apps. Manual conflict correction required: %s"
            % intersection)

    #STEP 3: Apply the changes at app-level
    #Memoization at this high of a level will help save time
    account_drivers = {}  # Provider -> accountDriver
    provider_tenant_mapping = {}  # Provider -> [{TenantId : TenantName},...]
    image_maps = {}
    for app in new_public_apps:
        make_machines_public(app, account_drivers, dry_run=dry_run)

    for app, membership in private_apps.items():
        make_machines_private(
            app,
            membership,
            account_drivers,
            provider_tenant_mapping,
            image_maps,
            dry_run=dry_run)

    if print_logs:
        celery_logger.removeHandler(consolehandler)
    return
Beispiel #6
0
def monitor_machines_for(provider_id, print_logs=False, dry_run=False):
    """
    Run the set of tasks related to monitoring machines for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.

    NEW LOGIC:
    * Membership and Privacy is dictated at the APPLICATION level.
    * loop over all machines on the cloud
    *   * If machine is PUBLIC, ensure the APP is public.
    *   * If machine is PRIVATE, ensure the APP is private && sync the membership!
    *   * Ignore the possibility of conflicts, prior schema should be sufficient for ensuring the above two usecases
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't Tucson.
    if 'iplant cloud - tucson' not in provider.location.lower():
        return
    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    #STEP 1: get the apps
    new_public_apps, private_apps = get_public_and_private_apps(provider)

    #STEP 2: Find conflicts and report them.
    intersection = set(private_apps.keys()) & set(new_public_apps)
    if intersection:
        raise Exception(
            "These applications were listed as BOTH public && private apps. Manual conflict correction required: %s"
            % intersection)

    #STEP 3: Apply the changes at app-level
    #Memoization at this high of a level will help save time
    account_drivers = {}  # Provider -> accountDriver
    provider_tenant_mapping = {}  # Provider -> [{TenantId : TenantName},...]
    image_maps = {}
    for app in new_public_apps:
        make_machines_public(app, account_drivers, dry_run=dry_run)

    for app, membership in private_apps.items():
        make_machines_private(app,
                              membership,
                              account_drivers,
                              provider_tenant_mapping,
                              image_maps,
                              dry_run=dry_run)

    if print_logs:
        celery_logger.removeHandler(consolehandler)
    return
Beispiel #7
0
def monitor_instances_for(provider_id,
                          users=None,
                          print_logs=False,
                          start_date=None,
                          end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return

    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    for username in sorted(instance_map.keys()):
        running_instances = instance_map[username]
        identity = _get_identity_from_tenant_name(provider, username)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(driver, inst, identity.provider.uuid,
                                         identity.uuid, identity.created_by)
                    for inst in running_instances
                ]
            except Exception as exc:
                celery_logger.exception(
                    "Could not convert running instances for %s" % username)
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(identity,
                                                    core_running_instances)
        allocation_result = user_over_allocation_enforcement(
            provider, username, print_logs, start_date, end_date)
    if print_logs:
        celery_logger.removeHandler(consolehandler)
Beispiel #8
0
def prune_machines_for(provider_id,
                       print_logs=False,
                       dry_run=False,
                       forced_removal=False):
    """
    Look at the list of machines (as seen by the AccountProvider)
    if a machine cannot be found in the list, remove it.
    NOTE: BEFORE CALLING THIS TASK you should ensure that the AccountProvider can see ALL images. Failure to do so will result in any unseen image to be prematurely end-dated and removed from the API/UI.
    """
    provider = Provider.objects.get(id=provider_id)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    if provider.is_active():
        account_driver = get_account_driver(provider)
        db_machines = ProviderMachine.objects.filter(
            only_current_source(), instance_source__provider=provider)
        all_projects_map = tenant_id_to_name_map(account_driver)
        cloud_machines = account_driver.list_all_images()
    else:
        db_machines = ProviderMachine.objects.filter(
            source_in_range(), instance_source__provider=provider)
        cloud_machines = []
    # Don't do anything if cloud machines == [None,[]]
    if not cloud_machines and not forced_removal:
        return

    # Loop1 - End-date All machines in the DB that can NOT be found in the cloud.
    cloud_machine_ids = [mach.id for mach in cloud_machines]
    for machine in db_machines:
        cloud_match = [
            mach for mach in cloud_machine_ids if mach == machine.identifier
        ]
        if not cloud_match:
            remove_machine(machine, dry_run=dry_run)

    if print_logs:
        celery_logger.removeHandler(consolehandler)
Beispiel #9
0
def prune_machines_for(provider_id,
                       print_logs=False,
                       dry_run=False,
                       forced_removal=False):
    """
    Look at the list of machines (as seen by the AccountProvider)
    if a machine cannot be found in the list, remove it.
    NOTE: BEFORE CALLING THIS TASK you should ensure that the AccountProvider can see ALL images. Failure to do so will result in any unseen image to be prematurely end-dated and removed from the API/UI.
    """
    provider = Provider.objects.get(id=provider_id)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    if provider.is_active():
        account_driver = get_account_driver(provider)
        db_machines = ProviderMachine.objects.filter(
            only_current_source(), instance_source__provider=provider)
        all_projects_map = tenant_id_to_name_map(account_driver)
        cloud_machines = account_driver.list_all_images()
    else:
        db_machines = ProviderMachine.objects.filter(
            source_in_range(), instance_source__provider=provider)
        cloud_machines = []
    # Don't do anything if cloud machines == [None,[]]
    if not cloud_machines and not forced_removal:
        return

    # Loop1 - End-date All machines in the DB that can NOT be found in the cloud.
    cloud_machine_ids = [mach.id for mach in cloud_machines]
    for machine in db_machines:
        cloud_match = [
            mach for mach in cloud_machine_ids if mach == machine.identifier
        ]
        if not cloud_match:
            remove_machine(machine, dry_run=dry_run)

    if print_logs:
        celery_logger.removeHandler(consolehandler)
def _exit_stdout_logging(consolehandler):
    celery_logger.removeHandler(consolehandler)
Beispiel #11
0
def _exit_stdout_logging(consolehandler):
    if settings.DEBUG:
        return
    celery_logger.removeHandler(consolehandler)
Beispiel #12
0
def _exit_stdout_logging(consolehandler):
    celery_logger.removeHandler(consolehandler)