Exemplo n.º 1
0
def set_provider_quota(identity_uuid, limit_dict=None):
    """
    """
    identity = Identity.objects.get(uuid=identity_uuid)
    if not identity.credential_set.all():
        # Can't update quota if credentials arent set
        return
    if not limit_dict:
        limit_dict = _get_hard_limits(identity.provider)
    if identity.provider.get_type_name().lower() == 'openstack':
        driver = get_cached_driver(identity=identity)
        username = identity.created_by.username
        user_id = driver._connection.key
        tenant_id = driver._connection._get_tenant_id()
        membership = IdentityMembership.objects.get(
            identity__uuid=identity_uuid,
            member__name=username)
        user_quota = membership.quota
        if user_quota:
            # Don't go above the hard-set limits per provider.
            if user_quota.cpu > limit_dict['cpu']:
                user_quota.cpu = limit_dict['cpu']
            if user_quota.memory > limit_dict['ram']:
                user_quota.memory = limit_dict['ram']
            # Use THESE values...
            values = {'cores': user_quota.cpu,
                      'ram': user_quota.memory * 1024}
            logger.info("Updating quota for %s to %s" % (username, values))
            ad = AccountDriver(identity.provider)
            admin_driver = ad.admin_driver
            admin_driver._connection.ex_update_quota_for_user(tenant_id,
                                                              user_id,
                                                              values)
    return True
Exemplo n.º 2
0
def _resolve_history_conflict(identity,
                              core_running_instance,
                              bad_history,
                              reset_time=None):
    """
    NOTE 1: This is a 'band-aid' fix until we are 100% that Transaction will
            not create conflicting un-end-dated objects.

    NOTE 2: It is EXPECTED that this instance has the 'esh' attribute
            Failure to add the 'esh' attribute will generate a ValueError!
    """
    if not getattr(core_running_instance, 'esh'):
        raise ValueError("Esh is missing from %s" % core_running_instance)
    esh_instance = core_running_instance.esh

    # Check for temporary status and fetch that
    tmp_status = esh_instance.extra.get('metadata', {}).get("tmp_status")
    new_status = tmp_status or esh_instance.extra['status']

    esh_driver = get_cached_driver(identity=identity)
    new_size = _esh_instance_size_to_core(esh_driver, esh_instance,
                                          identity.provider.uuid)
    if not reset_time:
        reset_time = timezone.now()
    for history in bad_history:
        history.end_date = reset_time
        history.save()
    new_history = InstanceStatusHistory.create_history(new_status,
                                                       core_running_instance,
                                                       new_size, reset_time)
    return new_history
Exemplo n.º 3
0
def create_bootable_volume(
        user,
        provider_uuid,
        identity_uuid,
        name,
        size_alias,
        new_source_alias,
        source_hint=None,
        **kwargs):
    """
    **kwargs passed as data to boot_volume_instance
    """

    identity = Identity.objects.get(uuid=identity_uuid)
    if not identity:
        raise Exception("Identity UUID %s does not exist." % identity_uuid)

    driver = get_cached_driver(identity=identity)
    if not driver:
        raise Exception(
            "Driver could not be initialized. Invalid Credentials?")

    size = driver.get_size(size_alias)
    if not size:
        raise Exception(
            "Size %s could not be located with this driver" % size_alias)

    # Return source or raises an Exception
    source = _retrieve_source(driver, new_source_alias, source_hint)

    core_instance = boot_volume_instance(driver, identity,
                                         source, size, name, **kwargs)

    return core_instance
Exemplo n.º 4
0
def allocation_source_overage_enforcement_for(allocation_source, user,
                                              identity):
    logger.debug(
        "allocation_source_overage_enforcement_for - allocation_source: %s, user: %s, identity: %s",
        allocation_source, user, identity)
    provider = identity.provider
    action = provider.over_allocation_action
    logger.debug(
        "allocation_source_overage_enforcement_for - provider.over_allocation_action: %s",
        provider.over_allocation_action)
    if not action:
        logger.debug("No 'over_allocation_action' provided for %s", provider)
        return []  # Over_allocation was not attempted
    if not settings.ENFORCING:
        logger.info("Settings dictate that ENFORCING = False. Returning..")
        return []
    try:
        driver = get_cached_driver(identity=identity)
        esh_instances = driver.list_instances()
    except LibcloudInvalidCredsError:
        raise Exception("User %s has invalid credentials on Identity %s" %
                        (user, identity))
    filtered_instances = filter_allocation_source_instances(
        allocation_source, user, esh_instances)
    # TODO: Parallelize this operation so you don't wait for larger instances
    # to finish 'wait_for' task below..
    instances = []
    for instance in filtered_instances:
        core_instance = execute_provider_action(user, driver, identity,
                                                instance, action)
        instances.append(core_instance)
    return instances
Exemplo n.º 5
0
def _get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    Values = List of identities / username
    NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the
          username // ex_tenant_name
    """
    from service.driver import get_account_driver

    admin_driver = get_cached_driver(provider=provider)
    accounts = get_account_driver(provider=provider, raise_exception=True)
    all_identities = _select_identities(provider, users)
    acct_providers = AccountProvider.objects.filter(provider=provider)
    if acct_providers:
        account_identity = acct_providers[0].identity
        provider = None
    else:
        account_identity = None

    all_instances = get_cached_instances(provider=provider,
                                         identity=account_identity,
                                         force=True)
    #all_tenants = admin_driver._connection._keystone_list_tenants()
    all_tenants = accounts.list_projects()
    # Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    # Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 6
0
def _set_compute_quota(user_quota, identity):
    # Use THESE values...
    compute_values = {
        'cores': user_quota.cpu,
        'ram': user_quota.memory*1024,  # NOTE: Value is stored in GB, Openstack (Liberty) expects MB
        'floating_ips': user_quota.floating_ip_count,
        'fixed_ips': user_quota.port_count,
        'instances': user_quota.instance_count,
    }
    creds = identity.get_all_credentials()
    use_tenant_id = False
    if creds.get('ex_force_auth_version','2.0_password') == "2.0_password":
        compute_values.pop('instances')
        use_tenant_id = True

    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, compute_values))
    driver = get_cached_driver(identity=identity)
    username = driver._connection.key
    tenant_id = driver._connection._get_tenant_id()
    tenant_name = identity.project_name()
    ad = get_account_driver(identity.provider)
    ks_user = ad.get_user(username)
    admin_driver = ad.admin_driver
    #FIXME: Remove 'use_tenant_id' when legacy clouds are no-longer in use.
    try:
        result = admin_driver._connection.ex_update_quota_for_user(
            tenant_id, ks_user.id, compute_values, use_tenant_id=use_tenant_id)
    except Exception:
        logger.exception("Could not set a user-quota, trying to set tenant-quota")
        result = admin_driver._connection.ex_update_quota(tenant_id, compute_values, use_tenant_id=use_tenant_id)
    logger.info("Updated quota for %s to %s" % (username, result))
    return result
Exemplo n.º 7
0
def _os_update_owner(provider_machine, tenant_name):
    from core.models import Provider
    from service.driver import get_admin_driver
    from service.cache import get_cached_machines, get_cached_driver
    provider = provider_machine.provider
    if provider not in Provider.get_active(type_name='openstack'):
        raise Exception("An active openstack provider is required to"
                        " update image owner")
    esh_driver = get_cached_driver(provider)
    if not esh_driver:
        raise Exception("The account driver of Provider %s is required to"
                        " update image metadata" % provider)
    esh_machines = get_cached_machines(provider, force=True)
    esh_machine = [
        mach for mach in esh_machines
        if mach.alias == provider_machine.identifier
    ]
    if not esh_machine:
        raise Exception("Machine with ID  %s not found" %
                        provider_machine.identifier)
    esh_machine = esh_machine[0]
    tenant_id = _tenant_name_to_id(provider_machine.provider, tenant_name)
    update_machine_metadata(esh_driver, esh_machine, {
        "owner": tenant_id,
        "application_owner": tenant_name
    })
Exemplo n.º 8
0
def allocation_source_overage_enforcement_for(allocation_source, user, identity):
    logger.debug("allocation_source_overage_enforcement_for - allocation_source: %s, user: %s, identity: %s",
                 allocation_source, user, identity)
    provider = identity.provider
    action = provider.over_allocation_action
    logger.debug("allocation_source_overage_enforcement_for - provider.over_allocation_action: %s",
                 provider.over_allocation_action)
    if not action:
        logger.debug("No 'over_allocation_action' provided for %s", provider)
        return []  # Over_allocation was not attempted
    if not settings.ENFORCING:
        logger.info("Settings dictate that ENFORCING = False. Returning..")
        return []
    try:
        driver = get_cached_driver(identity=identity)
        esh_instances = driver.list_instances()
    except LibcloudInvalidCredsError:
        raise Exception("User %s has invalid credentials on Identity %s" % (user, identity))
    filtered_instances = filter_allocation_source_instances(allocation_source, user, esh_instances)
    # TODO: Parallelize this operation so you don't wait for larger instances
    # to finish 'wait_for' task below..
    instances = []
    for instance in filtered_instances:
        core_instance = execute_provider_action(user, driver, identity, instance, action)
        instances.append(core_instance)
    return instances
def _resolve_history_conflict(
    identity, core_running_instance, bad_history, reset_time=None
):
    """
    NOTE 1: This is a 'band-aid' fix until we are 100% that Transaction will
            not create conflicting un-end-dated objects.

    NOTE 2: It is EXPECTED that this instance has the 'esh' attribute
            Failure to add the 'esh' attribute will generate a ValueError!
    """
    if not getattr(core_running_instance, 'esh'):
        raise ValueError("Esh is missing from %s" % core_running_instance)
    esh_instance = core_running_instance.esh

    # Check for temporary status and fetch that
    tmp_status = esh_instance.extra.get('metadata', {}).get("tmp_status")
    new_status = tmp_status or esh_instance.extra['status']

    esh_driver = get_cached_driver(identity=identity)
    new_size = _esh_instance_size_to_core(
        esh_driver, esh_instance, identity.provider.uuid
    )
    if not reset_time:
        reset_time = timezone.now()
    for history in bad_history:
        history.end_date = reset_time
        history.save()
    new_history = InstanceStatusHistory.create_history(
        new_status, core_running_instance, new_size, reset_time
    )
    return new_history
Exemplo n.º 10
0
def provider_over_allocation_enforcement(identity, user):
    provider = identity.provider
    action = provider.over_allocation_action
    if not action:
        logger.debug("No 'over_allocation_action' provided for %s" % provider)
        return False
    driver = get_cached_driver(identity=identity)
    esh_instances = driver.list_instances()
    #TODO: Parallelize this operation so you don't wait for larger instances to finish 'wait_for' task below..
    for instance in esh_instances:
        try:
            if driver._is_active_instance(instance):
                # Suspend active instances, update the task in the DB
                # NOTE: identity.created_by COULD BE the Admin User, indicating that this action/InstanceHistory was
                #       executed by the administrator.. Future Release Idea.
                _execute_provider_action(identity, identity.created_by,
                                         instance, action.name)
                # NOTE: Intentionally added to allow time for
                #      the Cloud to begin 'suspend' operation
                #      before querying for the instance again.
                #TODO: Instead: Add "wait_for" change from active to any terminal, non-active state?
                wait_time = random.uniform(2, 6)
                time.sleep(wait_time)
                updated_esh = driver.get_instance(instance.id)
                convert_esh_instance(driver, updated_esh,
                                     identity.provider.uuid, identity.uuid,
                                     user)
        except Exception, e:
            # Raise ANY exception that doesn't say
            # 'This instance is already in the requested VM state'
            #NOTE: This is OpenStack specific
            if 'in vm_state' not in e.message:
                raise
Exemplo n.º 11
0
def _execute_provider_action(identity, user, instance, action_name):
    driver = get_cached_driver(identity=identity)
    try:
        if not action_name:
            logger.debug("No 'action_name' provided")
            return
        elif action_name == 'Suspend':
            suspend_instance(driver, instance, identity.provider.uuid,
                             identity.uuid, user)
        elif action_name == 'Stop':
            stop_instance(driver, instance, identity.provider.uuid,
                          identity.uuid, user)
        elif action_name == 'Shelve':
            shelve_instance(driver, instance, identity.provider.uuid,
                            identity.uuid, user)
        elif action_name == 'Shelve Offload':
            offload_instance(driver, instance, identity.provider.uuid,
                             identity.uuid, user)
        elif action_name == 'Terminate':
            destroy_instance(identity.uuid, instance)
        else:
            raise Exception("Encountered Unknown Action Named %s" % action)
    except ObjectDoesNotExist:
        # This may be unreachable when null,blank = True
        logger.debug("Provider %s - 'Do Nothing' for Over Allocation" %
                     provider)
        return
Exemplo n.º 12
0
def launch_instance(user, provider_uuid, identity_uuid,
                    size_alias, source_alias, deploy=True,
                    **launch_kwargs):
    """
    USE THIS TO LAUNCH YOUR INSTANCE FROM THE REPL!
    Initialization point --> launch_*_instance --> ..
    Required arguments will launch the instance, extras will do
    provider-specific modifications.

    1. Test for available Size (on specific driver!)
    2. Test user has Quota/Allocation (on our DB)
    3. Test user is launching appropriate size (Not below Thresholds)
    4. Perform an 'Instance launch' depending on Boot Source
    5. Return CORE Instance with new 'esh' objects attached.
    """
    now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    status_logger.debug("%s,%s,%s,%s,%s,%s"
                 % (now_time, user, "No Instance", source_alias, size_alias,
                    "Request Received"))
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    esh_driver = get_cached_driver(identity=identity)

    #May raise Exception("Size not available")
    size = check_size(esh_driver, size_alias, provider_uuid)

    #May raise Exception("Volume/Machine not available")
    boot_source = get_boot_source(user.username, identity_uuid, source_alias)

    #Raise any other exceptions before launching here
    _pre_launch_validation(user.username, esh_driver, identity_uuid, boot_source, size)

    core_instance = _select_and_launch_source(user, identity_uuid, esh_driver, boot_source, size, deploy=deploy, **launch_kwargs)
    return core_instance
Exemplo n.º 13
0
def set_provider_quota(identity_id):
    """
    """
    identity = Identity.objects.get(id=identity_id)
    if not identity.credential_set.all():
        #Can't update quota if credentials arent set
        return
    if identity.provider.get_type_name().lower() == 'openstack':
        driver = get_cached_driver(identity=identity)
        username = identity.created_by.username
        user_id = driver._connection._get_user_id()
        tenant_id = driver._connection._get_tenant_id()
        membership = IdentityMembership.objects.get(identity__id=identity_id,
                                                    member__name=username)
        user_quota = membership.quota
        if user_quota:
            values = {'cores': user_quota.cpu,
                      'ram': user_quota.memory * 1024}
            logger.info("Updating quota for %s to %s" % (username, values))
            ad = AccountDriver(identity.provider)
            admin_driver = ad.admin_driver
            admin_driver._connection.ex_update_quota_for_user(tenant_id,
                                                              user_id,
                                                              values)
    return True
def create_volume_snapshot(identity_uuid, volume_id, name, description):
    """
    Create a new volume snapshot
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)

        esh_volume = driver._connection.ex_get_volume(volume_id)

        if not esh_volume:
            raise Exception("No volume found for id=%s." % volume_id)

        snapshot = driver._connection.ex_create_snapshot(
            esh_volume, name, description
        )

        if not snapshot:
            raise Exception("The snapshot could not be created.")
    except SoftTimeLimitExceeded as e:
        logger.info("Task too long to complete. Task will be retried")
        create_volume_snapshot.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise
Exemplo n.º 15
0
def create_bootable_volume(
        user,
        provider_uuid,
        identity_uuid,
        name,
        size_alias,
        new_source_alias,
        source_hint=None,
        **kwargs):
    """
    **kwargs passed as data to boot_volume_instance
    """

    identity = Identity.objects.get(uuid=identity_uuid)
    if not identity:
        raise Exception("Identity UUID %s does not exist." % identity_uuid)

    driver = get_cached_driver(identity=identity)
    if not driver:
        raise Exception(
            "Driver could not be initialized. Invalid Credentials?")

    size = driver.get_size(size_alias)
    if not size:
        raise Exception(
            "Size %s could not be located with this driver" % size_alias)

    # Return source or raises an Exception
    source = _retrieve_source(driver, new_source_alias, source_hint)

    core_instance = boot_volume_instance(driver, identity,
                                         source, size, name, **kwargs)

    return core_instance
Exemplo n.º 16
0
def set_provider_quota(identity_uuid, limit_dict=None):
    """
    """
    identity = Identity.objects.get(uuid=identity_uuid)
    if not identity.credential_set.all():
        # Can't update quota if credentials arent set
        return
    if not limit_dict:
        limit_dict = _get_hard_limits(identity.provider)
    if identity.provider.get_type_name().lower() == 'openstack':
        driver = get_cached_driver(identity=identity)
        username = identity.created_by.username
        user_id = driver._connection._get_user_id()
        tenant_id = driver._connection._get_tenant_id()
        membership = IdentityMembership.objects.get(
            identity__uuid=identity_uuid, member__name=username)
        user_quota = membership.quota
        if user_quota:
            # Don't go above the hard-set limits per provider.
            if user_quota.cpu > limit_dict['cpu']:
                user_quota.cpu = limit_dict['cpu']
            if user_quota.memory > limit_dict['ram']:
                user_quota.memory = limit_dict['ram']
            # Use THESE values...
            values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024}
            logger.info("Updating quota for %s to %s" % (username, values))
            ad = AccountDriver(identity.provider)
            admin_driver = ad.admin_driver
            admin_driver._connection.ex_update_quota_for_user(
                tenant_id, user_id, values)
    return True
Exemplo n.º 17
0
def create_volume_from_snapshot(identity_uuid, snapshot_id, size_id, name,
                                description, metadata):
    """
    Create a new volume for the snapshot

    NOTE: The size must be at least the same size as the original volume.
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        snapshot = driver._connection.ex_get_snapshot(snapshot_id)
        size = driver._connection.ex_get_size(size_id)

        if not snapshot:
            raise Exception("No snapshot found for id=%s." % snapshot_id)

        if not size:
            raise Exception("No size found for id=%s." % size_id)

        success, esh_volume = driver._connection.create_volume(
            snapshot.size, name, description=description, metadata=metadata,
            snapshot=snapshot)

        if not success:
            raise Exception("Could not create volume from snapshot")

        # Save the new volume to the database
        convert_esh_volume(
            esh_volume, identity.provider.uuid, identity_uuid,
            identity.created_by)
    except SoftTimeLimitExceeded as e:
        create_volume_from_snapshot.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise
Exemplo n.º 18
0
def provider_over_allocation_enforcement(identity, user):
    provider = identity.provider
    action = provider.over_allocation_action
    if not action:
            logger.debug("No 'over_allocation_action' provided for %s" % provider)
            return False
    driver = get_cached_driver(identity=identity)
    esh_instances = driver.list_instances()
    #TODO: Parallelize this operation so you don't wait for larger instances to finish 'wait_for' task below..
    for instance in esh_instances:
        try:
            if driver._is_active_instance(instance):
                # Suspend active instances, update the task in the DB
                # NOTE: identity.created_by COULD BE the Admin User, indicating that this action/InstanceHistory was
                #       executed by the administrator.. Future Release Idea.
                _execute_provider_action(identity, identity.created_by, instance, action.name)
                # NOTE: Intentionally added to allow time for
                #      the Cloud to begin 'suspend' operation
                #      before querying for the instance again.
                #TODO: Instead: Add "wait_for" change from active to any terminal, non-active state?
                wait_time = random.uniform(2, 6)
                time.sleep(wait_time)
                updated_esh = driver.get_instance(instance.id)
                convert_esh_instance(
                    driver, updated_esh,
                    identity.provider.uuid,
                    identity.uuid,
                    user)
        except Exception, e:
            # Raise ANY exception that doesn't say
            # 'This instance is already in the requested VM state'
            #NOTE: This is OpenStack specific
            if 'in vm_state' not in e.message:
                raise
Exemplo n.º 19
0
def create_volume_from_image(identity_uuid, image_id, size_id, name,
                             description, metadata):
    """
    Create a new volume from an image
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        user = identity.created_by
        driver = get_cached_driver(identity=identity)
        image = driver._connection.ex_get_image(image_id)
        size = driver._connection.ex_get_size(size_id)

        if not image:
            raise Exception("No image found for id=%s." % image_id)

        if not size:
            raise Exception("No size found for id=%s." % size_id)

        success, esh_volume = driver._connection.create_volume(
            size.id, name, description=description, metadata=metadata,
            image=image)

        if not success:
            raise Exception("Could not create volume from image")

        # Save the new volume to the database
        convert_esh_volume(
            esh_volume, identity.provider.uuid, identity_uuid, user)
    except SoftTimeLimitExceeded as e:
        create_volume_from_image.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise
Exemplo n.º 20
0
def _get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    Values = List of identities / username
    NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the
          username // ex_tenant_name
    """
    from service.driver import get_account_driver

    admin_driver = get_cached_driver(provider=provider)
    accounts = get_account_driver(provider=provider)
    all_identities = _select_identities(provider, users)
    acct_providers = AccountProvider.objects.filter(provider=provider)
    if acct_providers:
        account_identity = acct_providers[0].identity
        provider = None
    else:
        account_identity = None


    all_instances = get_cached_instances(provider=provider, identity=account_identity, force=True)
    #all_tenants = admin_driver._connection._keystone_list_tenants()
    all_tenants = accounts.list_projects()
    # Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    # Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 21
0
def _execute_provider_action(identity, user, instance, action_name):
    driver = get_cached_driver(identity=identity)

    # NOTE: This if statement is a HACK! It will be removed when IP management is enabled in an upcoming version. -SG
    reclaim_ip = True if identity.provider.location != 'iPlant Cloud - Tucson' else False
    # ENDNOTE

    # NOTE: This metadata statement is a HACK! It should be removed when all instances matching this metadata key have been removed.
    instance_has_home_mount = instance.extra['metadata'].get('atmosphere_ephemeral_home_mount', 'false').lower()
    if instance_has_home_mount == 'true' and action_name == 'Shelve':
        logger.info("Instance %s will be suspended instead of shelved, because the ephemeral storage is in /home" % instance.id)
        action_name = 'Suspend'


    logger.info("User %s has gone over their allocation on Instance %s - Enforcement Choice: %s" % (user, instance, action_name))
    try:
        if not action_name:
            logger.debug("No 'action_name' provided")
            return
        elif action_name == 'Suspend':
            suspend_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user,
                reclaim_ip)
        elif action_name == 'Stop':
            stop_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user,
                reclaim_ip)
        elif action_name == 'Shelve':
            shelve_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user,
                reclaim_ip)
        elif action_name == 'Shelve Offload':
            offload_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user,
                reclaim_ip)
        elif action_name == 'Terminate':
            destroy_instance(user, identity.uuid, instance.id)
        else:
            raise Exception("Encountered Unknown Action Named %s" % action_name)
    except ObjectDoesNotExist:
        # This may be unreachable when null,blank = True
        logger.debug("Provider %s - 'Do Nothing' for Over Allocation" % identity.provider)
        return
Exemplo n.º 22
0
def monitor_instances_for(provider_id, users=None,
                          print_logs=False, check_allocations=False, start_date=None, end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return

    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        celery_logger.addHandler(consolehandler)

    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    running_total = 0
    for username in sorted(instance_map.keys()):
        running_instances = instance_map[username]
        running_total += len(running_instances)
        identity = _get_identity_from_tenant_name(provider, username)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(
                        driver,
                        inst,
                        identity.provider.uuid,
                        identity.uuid,
                        identity.created_by) for inst in running_instances]
            except Exception as exc:
                celery_logger.exception(
                    "Could not convert running instances for %s" %
                    username)
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(
            identity,
            core_running_instances)
        if check_allocations:
            allocation_result = user_over_allocation_enforcement(
                provider, username,
                print_logs, start_date, end_date)
    if print_logs:
        celery_logger.removeHandler(consolehandler)
    return running_total
Exemplo n.º 23
0
def monitor_instances_for(provider_id,
                          users=None,
                          print_logs=False,
                          check_allocations=False,
                          start_date=None,
                          end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return

    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        console_handler = _init_stdout_logging()

    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    running_total = 0
    if not settings.ENFORCING:
        celery_logger.debug('Settings dictate allocations are NOT enforced')
    for username in sorted(instance_map.keys()):
        running_instances = instance_map[username]
        running_total += len(running_instances)
        identity = _get_identity_from_tenant_name(provider, username)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(driver, inst, identity.provider.uuid,
                                         identity.uuid, identity.created_by)
                    for inst in running_instances
                ]
            except Exception as exc:
                celery_logger.exception(
                    "Could not convert running instances for %s" % username)
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(identity,
                                                    core_running_instances)
        if check_allocations:
            allocation_result = user_over_allocation_enforcement(
                provider, username, print_logs, start_date, end_date)
    if print_logs:
        _exit_stdout_logging(console_handler)
    return running_total
Exemplo n.º 24
0
def get_core_instances(identity_uuid):
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    driver = get_cached_driver(identity=identity)
    instances = driver.list_instances()
    core_instances = [convert_esh_instance(driver,
                                           esh_instance,
                                           identity.provider.uuid,
                                           identity.uuid,
                                           identity.created_by)
                      for esh_instance in instances]
    return core_instances
Exemplo n.º 25
0
def get_core_instances(identity_uuid):
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    driver = get_cached_driver(identity=identity)
    instances = driver.list_instances()
    core_instances = [convert_esh_instance(driver,
                                           esh_instance,
                                           identity.provider.uuid,
                                           identity.uuid,
                                           identity.created_by)
                      for esh_instance in instances]
    return core_instances
Exemplo n.º 26
0
def monitor_instances_for(provider_id,
                          users=None,
                          print_logs=False,
                          start_date=None,
                          end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    #For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return

    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        import logging
        import sys
        consolehandler = logging.StreamHandler(sys.stdout)
        consolehandler.setLevel(logging.DEBUG)
        logger.addHandler(consolehandler)

    #DEVNOTE: Potential slowdown running multiple functions
    #Break this out when instance-caching is enabled
    for username in sorted(instance_map.keys()):
        running_instances = instance_map[username]
        identity = _get_identity_from_tenant_name(provider, username)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(driver, inst, identity.provider.uuid,
                                         identity.uuid, identity.created_by)
                    for inst in running_instances
                ]
            except Exception as exc:
                logger.exception("Could not convert running instances for %s" %
                                 username)
                continue
        else:
            #No running instances.
            core_running_instances = []
        #Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(identity,
                                                    core_running_instances)
        allocation_result = user_over_allocation_enforcement(
            provider, username, print_logs, start_date, end_date)
    if print_logs:
        logger.removeHandler(consolehandler)
Exemplo n.º 27
0
def monitor_instances_for(provider_id, users=None,
                          print_logs=False, start_date=None, end_date=None):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return
    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        console_handler = _init_stdout_logging()
    seen_instances = []
    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    if not settings.ENFORCING:
        celery_logger.debug('Settings dictate allocations are NOT enforced')
    for tenant_name in sorted(instance_map.keys()):
        running_instances = instance_map[tenant_name]
        identity = _get_identity_from_tenant_name(provider, tenant_name)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(
                        driver,
                        inst,
                        identity.provider.uuid,
                        identity.uuid,
                        identity.created_by) for inst in running_instances]
                seen_instances.extend(core_running_instances)
            except Exception as exc:
                celery_logger.exception(
                    "Could not convert running instances for %s" %
                    tenant_name)
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        core_instances = _cleanup_missing_instances(
            identity,
            core_running_instances)
    if print_logs:
        _exit_stdout_logging(console_handler)
    # return seen_instances  NOTE: this has been commented out to avoid PicklingError!
    # TODO: Uncomment the above, Determine what _we can return_ and return that instead....
    return
Exemplo n.º 28
0
def provider_over_allocation_enforcement(identity, user):
    provider = identity.provider
    action = provider.over_allocation_action
    if not action:
        logger.debug("No 'over_allocation_action' provided for %s" % provider)
        return False
    driver = get_cached_driver(identity=identity)
    esh_instances = driver.list_instances()
    # TODO: Parallelize this operation so you don't wait for larger instances
    # to finish 'wait_for' task below..
    for instance in esh_instances:
        execute_provider_action(user, driver, identity, instance, action)
    return True  # User was over_allocation
Exemplo n.º 29
0
def monitor_instances_for(
    provider_id, users=None, print_logs=False, start_date=None, end_date=None
):
    """
    Run the set of tasks related to monitoring instances for a provider.
    Optionally, provide a list of usernames to monitor
    While debugging, print_logs=True can be very helpful.
    start_date and end_date allow you to search a 'non-standard' window of time.
    """
    provider = Provider.objects.get(id=provider_id)

    # For now, lets just ignore everything that isn't openstack.
    if 'openstack' not in provider.type.name.lower():
        return
    instance_map = _get_instance_owner_map(provider, users=users)

    if print_logs:
        console_handler = _init_stdout_logging()
    seen_instances = []
    # DEVNOTE: Potential slowdown running multiple functions
    # Break this out when instance-caching is enabled
    if not settings.ENFORCING:
        celery_logger.debug('Settings dictate allocations are NOT enforced')
    for tenant_name in sorted(instance_map.keys()):
        running_instances = instance_map[tenant_name]
        identity = _get_identity_from_tenant_name(provider, tenant_name)
        if identity and running_instances:
            try:
                driver = get_cached_driver(identity=identity)
                core_running_instances = [
                    convert_esh_instance(
                        driver, inst, identity.provider.uuid, identity.uuid,
                        identity.created_by
                    ) for inst in running_instances
                ]
                seen_instances.extend(core_running_instances)
            except Exception:
                celery_logger.exception(
                    "Could not convert running instances for %s" % tenant_name
                )
                continue
        else:
            # No running instances.
            core_running_instances = []
        # Using the 'known' list of running instances, cleanup the DB
        _cleanup_missing_instances(identity, core_running_instances)
    if print_logs:
        _exit_stdout_logging(console_handler)
    # return seen_instances  NOTE: this has been commented out to avoid PicklingError!
    # TODO: Uncomment the above, Determine what _we can return_ and return that instead....
    return
Exemplo n.º 30
0
def get_current_quota(identity_id):
    driver = get_cached_driver(identity=Identity.objects.get(id=identity_id))
    cpu = ram = disk = suspended = 0
    instances = driver.list_instances()
    for instance in instances:
        if instance.extra['status'] == 'suspended'\
        or instance.extra['status'] == 'shutoff':
            suspended += 1
            continue
        size = driver.get_size(instance.size.id)
        cpu += size.cpu
        ram += size.ram
        disk += size.disk
    return {'cpu': cpu, 'ram': ram, 'disk': disk, 'suspended_count': suspended}
Exemplo n.º 31
0
def _set_volume_quota(user_quota, identity):
    volume_values = {
        'volumes': user_quota.storage_count,
        'gigabytes': user_quota.storage,
        'snapshots': user_quota.snapshot_count,
    }
    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, volume_values))
    driver = get_cached_driver(identity=identity)
    username = driver._connection._get_username()
    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    admin_driver._connection._cinder_update_quota(username, volume_values)
    return
Exemplo n.º 32
0
def _set_volume_quota(user_quota, identity):
    volume_values = {
        'volumes': user_quota.storage_count,
        'gigabytes': user_quota.storage,
        'snapshots': user_quota.snapshot_count,
    }
    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, volume_values))
    driver = get_cached_driver(identity=identity)
    username = driver._connection._get_username()
    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    admin_driver._connection._cinder_update_quota(username, volume_values)
    return
Exemplo n.º 33
0
def get_boot_source(username, identity_uuid, source_identifier):
    try:
        identity = CoreIdentity.objects.get(
                uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        sources = InstanceSource.current_sources()
        boot_source = sources.get(
                provider=identity.provider,
                identifier=source_identifier)
        return boot_source
    except CoreIdentity.DoesNotExist:
        raise Exception("Identity %s does not exist" % identity_uuid)
    except InstanceSource.DoesNotExist:
        raise Exception("No boot source found with identifier %s"
                % source_identifier)
Exemplo n.º 34
0
def get_current_quota(identity_id):
    driver = get_cached_driver(
        identity=Identity.objects.get(id=identity_id))
    cpu = ram = disk = suspended = 0
    instances = driver.list_instances()
    for instance in instances:
        if instance.extra['status'] == 'suspended'\
        or instance.extra['status'] == 'shutoff':
            suspended += 1
            continue
        size = driver.get_size(instance.size.id)
        cpu += size.cpu
        ram += size.ram
        disk += size.disk
    return {'cpu': cpu, 'ram': ram, 'disk': disk, 'suspended_count': suspended}
Exemplo n.º 35
0
def get_boot_source(username, identity_uuid, source_identifier):
    try:
        identity = CoreIdentity.objects.get(
                uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        sources = InstanceSource.current_sources()
        boot_source = sources.get(
                provider=identity.provider,
                identifier=source_identifier)
        return boot_source
    except CoreIdentity.DoesNotExist:
        raise Exception("Identity %s does not exist" % identity_uuid)
    except InstanceSource.DoesNotExist:
        raise Exception("No boot source found with identifier %s"
                % source_identifier)
Exemplo n.º 36
0
def check_over_instance_quota(
    username,
    identity_uuid,
    esh_size=None,
    include_networking=False,
    raise_exc=True
):
    """
    Checks quota based on current limits (and an instance of size, if passed).
    param - esh_size - if included, update the CPU and Memory totals & increase instance_count
    param - launch_networking - if True, increase floating_ip_count
    param - raise_exc - if True, raise ValidationError, otherwise return False

    return True if passing
    return False if ValidationError occurs and raise_exc=False
    By default, allow ValidationError to raise.

    return or raise exc
    """
    memberships_available = IdentityMembership.objects.filter(
        identity__uuid=identity_uuid,
        member__memberships__user__username=username
    )
    if memberships_available:
        membership = memberships_available.first()
    identity = membership.identity
    quota = identity.quota
    driver = get_cached_driver(identity=identity)
    new_port = new_floating_ip = new_instance = new_cpu = new_ram = 0
    if esh_size:
        new_cpu += esh_size.cpu
        new_ram += esh_size.ram
        new_instance += 1
        new_port += 1
    if include_networking:
        new_floating_ip += 1
    # Will throw ValidationError if false.
    try:
        has_cpu_quota(driver, quota, new_cpu)
        has_mem_quota(driver, quota, new_ram)
        has_instance_count_quota(driver, quota, new_instance)
        has_floating_ip_count_quota(driver, quota, new_floating_ip)
        has_port_count_quota(identity, driver, quota, new_port)
        return True
    except ValidationError:
        if raise_exc:
            raise
        return False
Exemplo n.º 37
0
def current_instance_time(user, instances, identity_id, delta_time):
    """
    Converts all running instances to core, 
    so that the database is up to date before calling 'core_instance_time'
    """
    from api import get_esh_driver
    ident = Identity.objects.get(id=identity_id)
    driver = get_cached_driver(identity=ident)
    core_instance_list = [
        convert_esh_instance(driver, inst,
                             ident.provider.id, ident.id, user)
        for inst in instances]
    #All instances that don't have an end-date should be
    #included, even if all of their time is not.
    time_used = core_instance_time(user, ident.id, delta_time, running=core_instance_list)
    return time_used
Exemplo n.º 38
0
def get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    """
    admin_driver = get_cached_driver(provider=provider)
    all_identities = _select_identities(provider, users)
    all_instances = get_cached_instances(provider=provider)
    all_tenants = admin_driver._connection._keystone_list_tenants()
    #Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    #Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 39
0
def get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    """
    admin_driver = get_cached_driver(provider=provider)
    all_identities = _select_identities(provider, users)
    all_instances = get_cached_instances(provider=provider)
    all_tenants = admin_driver._connection._keystone_list_tenants()
    #Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    #Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 40
0
def launch_instance(user, provider_uuid, identity_uuid,
                    size_alias, source_alias, **kwargs):
    """
    Initialization point --> launch_*_instance --> ..
    Required arguments will launch the instance, extras will do
    provider-specific modifications.

    1. Test for available Size (on specific driver!)
    2. Test user has Quota/Allocation (on our DB)
    3. Test user is launching appropriate size (Not below Thresholds)
    4. Perform an 'Instance launch' depending on Boot Source
    5. Return CORE Instance with new 'esh' objects attached.
    """
    now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    status_logger.debug("%s,%s,%s,%s,%s,%s"
                 % (now_time, user, "No Instance", source_alias, size_alias,
                    "Request Received"))
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    esh_driver = get_cached_driver(identity=identity)

    #May raise SizeNotAvailable
    size = check_size(esh_driver, size_alias, provider_uuid)

    #May raise OverQuotaError or OverAllocationError
    check_quota(user.username, identity_uuid, size)

    #May raise UnderThresholdError
    check_application_threshold(user.username, identity_uuid, size, source_alias)

    #May raise Exception("Volume/Machine not available")
    boot_source = get_boot_source(user.username, identity_uuid, source_alias)
    if boot_source.is_volume():
        #NOTE: THIS route works when launching an EXISTING volume ONLY
        #      to CREATE a new bootable volume (from an existing volume/image/snapshot)
        #      use service/volume.py 'boot_volume'
        volume = _retrieve_source(esh_driver, boot_source.identifier, "volume")
        core_instance = launch_volume_instance(esh_driver, identity,
                volume, size, **kwargs)
    elif boot_source.is_machine():
        machine = _retrieve_source(esh_driver, boot_source.identifier, "machine")
        core_instance = launch_machine_instance(esh_driver, identity,
                machine, size, **kwargs)
    else:
        raise Exception("Boot source is of an unknown type")
    return core_instance
Exemplo n.º 41
0
def _execute_provider_action(identity, user, instance, action_name):
    driver = get_cached_driver(identity=identity)

    # NOTE: This if statement is a HACK! It will be removed when IP management is enabled in an upcoming version. -SG
    reclaim_ip = True if identity.provider.location != 'iPlant Cloud - Tucson' else False
    # ENDNOTE

    # NOTE: This metadata statement is a HACK! It should be removed when all instances matching this metadata key have been removed.
    instance_has_home_mount = instance.extra['metadata'].get(
        'atmosphere_ephemeral_home_mount', 'false').lower()
    if instance_has_home_mount == 'true' and action_name == 'Shelve':
        logger.info(
            "Instance %s will be suspended instead of shelved, because the ephemeral storage is in /home"
            % instance.id)
        action_name = 'Suspend'

    logger.info(
        "User %s has gone over their allocation on Instance %s - Enforcement Choice: %s"
        % (user, instance, action_name))
    try:
        if not action_name:
            logger.debug("No 'action_name' provided")
            return
        elif action_name == 'Suspend':
            suspend_instance(driver, instance, identity.provider.uuid,
                             identity.uuid, user, reclaim_ip)
        elif action_name == 'Stop':
            stop_instance(driver, instance, identity.provider.uuid,
                          identity.uuid, user, reclaim_ip)
        elif action_name == 'Shelve':
            shelve_instance(driver, instance, identity.provider.uuid,
                            identity.uuid, user, reclaim_ip)
        elif action_name == 'Shelve Offload':
            offload_instance(driver, instance, identity.provider.uuid,
                             identity.uuid, user, reclaim_ip)
        elif action_name == 'Terminate':
            destroy_instance(user, identity.uuid, instance.id)
        else:
            raise Exception("Encountered Unknown Action Named %s" %
                            action_name)
    except ObjectDoesNotExist:
        # This may be unreachable when null,blank = True
        logger.debug("Provider %s - 'Do Nothing' for Over Allocation" %
                     identity.provider)
        return
Exemplo n.º 42
0
def _set_network_quota(user_quota, identity):
    network_values = {
        'port': user_quota.port_count,
        'floatingip': user_quota.floating_ip_count,
        # INTENTIONALLY SKIPPED/IGNORED
        # 'subnet', 'router', 'network',
        # 'security_group', 'security_group_rules'
    }
    username = identity.created_by.username
    logger.info("Updating network quota for %s to %s" %
                (username, network_values))
    driver = get_cached_driver(identity=identity)
    tenant_id = driver._connection._get_tenant_id()

    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    admin_driver._connection._neutron_update_quota(tenant_id, network_values)
    return
Exemplo n.º 43
0
def _set_compute_quota(user_quota, identity):
    # Use THESE values...
    compute_values = {
        'cores': user_quota.cpu,
        'ram': user_quota.memory,  # NOTE: Test that this works on havana
        'floating_ips': user_quota.floating_ip_count,
        'fixed_ips': user_quota.port_count,
        'instances': user_quota.instance_count,
    }
    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, compute_values))
    driver = get_cached_driver(identity=identity)
    user_id = driver._connection.key
    tenant_id = driver._connection._get_tenant_id()
    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    return admin_driver._connection.ex_update_quota_for_user(
        tenant_id, user_id, compute_values)
Exemplo n.º 44
0
def _set_compute_quota(user_quota, identity):
    # Use THESE values...
    compute_values = {
        'cores': user_quota.cpu,
        'ram': user_quota.memory,  # NOTE: Test that this works on havana
        'floating_ips': user_quota.floating_ip_count,
        'fixed_ips': user_quota.port_count,
        'instances': user_quota.instance_count,
    }
    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, compute_values))
    driver = get_cached_driver(identity=identity)
    user_id = driver._connection.key
    tenant_id = driver._connection._get_tenant_id()
    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    return admin_driver._connection.ex_update_quota_for_user(
        tenant_id, user_id, compute_values)
Exemplo n.º 45
0
def _set_network_quota(user_quota, identity):
    network_values = {
        'port': user_quota.port_count,
        'floatingip': user_quota.floating_ip_count,
        # INTENTIONALLY SKIPPED/IGNORED
        # 'subnet', 'router', 'network',
        # 'security_group', 'security_group_rules'
    }
    username = identity.created_by.username
    logger.info("Updating network quota for %s to %s"
                % (username, network_values))
    driver = get_cached_driver(identity=identity)
    tenant_id = driver._connection._get_tenant_id()

    ad = get_account_driver(identity.provider)
    admin_driver = ad.admin_driver
    admin_driver._connection._neutron_update_quota(tenant_id, network_values)
    return
Exemplo n.º 46
0
def destroy_instance(identity_uuid, instance_alias):
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    esh_driver = get_cached_driver(identity=identity)
    instance = esh_driver.get_instance(instance_alias)
    #Bail if instance doesnt exist
    if not instance:
        return None
    _check_volume_attachment(esh_driver, instance)
    if isinstance(esh_driver, OSDriver):
        #Openstack: Remove floating IP first
        try:
            esh_driver._connection.ex_disassociate_floating_ip(instance)
        except Exception as exc:
            if not ("floating ip not found" in exc.message
                    or "422 Unprocessable Entity Floating ip" in exc.message):
                raise
    node_destroyed = esh_driver._connection.destroy_node(instance)
    return node_destroyed
Exemplo n.º 47
0
def _execute_provider_action(identity, user, instance, action_name):
    driver = get_cached_driver(identity=identity)
    try:
        if not action_name:
            logger.debug("No 'action_name' provided")
            return
        elif action_name == 'Suspend':
            suspend_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user)
        elif action_name == 'Stop':
            stop_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user)
        elif action_name == 'Shelve':
            shelve_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user)
        elif action_name == 'Shelve Offload':
            offload_instance(
                driver,
                instance,
                identity.provider.uuid,
                identity.uuid,
                user)
        elif action_name == 'Terminate':
            destroy_instance(user, identity.uuid, instance)
        else:
            raise Exception("Encountered Unknown Action Named %s" % action)
    except ObjectDoesNotExist:
        # This may be unreachable when null,blank = True
        logger.debug(
            "Provider %s - 'Do Nothing' for Over Allocation" %
            provider)
        return
Exemplo n.º 48
0
def check_over_storage_quota(
    username,
    identity_uuid,
    new_snapshot_size=0,
    new_volume_size=0,
    raise_exc=True
):
    """
    Checks quota based on current limits.
    param - new_snapshot_size - if included and non-zero, increase snapshot_count
    param - new_volume_size - if included and non-zero, add to storage total & increase storage_count
    param - raise_exc - if True, raise ValidationError, otherwise return False

    return True if passing
    return False if ValidationError occurs and raise_exc=False
    By default, allow ValidationError to raise.
    """
    memberships_available = IdentityMembership.objects.filter(
        identity__uuid=identity_uuid,
        member__memberships__user__username=username
    )
    if memberships_available:
        membership = memberships_available.first()
    identity = membership.identity
    quota = identity.quota
    driver = get_cached_driver(identity=identity)

    # FIXME: I don't believe that 'snapshot' size and 'volume' size share
    # the same quota, so for now we ignore 'snapshot-size',
    # and only care that value is 0 or >1
    new_snapshot = 1 if new_snapshot_size > 0 else 0

    new_disk = new_volume_size
    new_volume = 1 if new_volume_size > 0 else 0
    # Will throw ValidationError if false.
    try:
        has_storage_quota(driver, quota, new_disk)
        has_storage_count_quota(driver, quota, new_volume)
        has_snapshot_count_quota(driver, quota, new_snapshot)
        return True
    except ValidationError:
        if raise_exc:
            raise
        return False
Exemplo n.º 49
0
def _get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    Values = List of identities / username
    NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the
          username // ex_tenant_name
    """
    admin_driver = get_cached_driver(provider=provider)
    all_identities = _select_identities(provider, users)
    all_instances = get_cached_instances(provider=provider)
    all_tenants = admin_driver._connection._keystone_list_tenants()
    # Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    # Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 50
0
def check_over_instance_quota(
        username, identity_uuid, esh_size=None,
        include_networking=False, raise_exc=True):
    """
    Checks quota based on current limits (and an instance of size, if passed).
    param - esh_size - if included, update the CPU and Memory totals & increase instance_count
    param - launch_networking - if True, increase floating_ip_count
    param - raise_exc - if True, raise ValidationError, otherwise return False

    return True if passing
    return False if ValidationError occurs and raise_exc=False
    By default, allow ValidationError to raise.

    return or raise exc
    """
    memberships_available = IdentityMembership.objects.filter(
        identity__uuid=identity_uuid,
        member__memberships__user__username=username)
    if memberships_available:
        membership = memberships_available.first()
    identity = membership.identity
    quota = identity.quota
    driver = get_cached_driver(identity=identity)
    new_port = new_floating_ip = new_instance = new_cpu = new_ram = 0
    if esh_size:
        new_cpu += esh_size.cpu
        new_ram += esh_size.ram
        new_instance += 1
        new_port += 1
    if include_networking:
        new_floating_ip += 1
    # Will throw ValidationError if false.
    try:
        has_cpu_quota(driver, quota, new_cpu)
        has_mem_quota(driver, quota, new_ram)
        has_instance_count_quota(driver, quota, new_instance)
        has_floating_ip_count_quota(driver, quota, new_floating_ip)
        has_port_count_quota(identity, driver, quota, new_port)
        return True
    except ValidationError:
        if raise_exc:
            raise
        return False
Exemplo n.º 51
0
def current_instance_time(user, instances, identity_id, delta_time):
    """
    Converts all running instances to core, 
    so that the database is up to date before calling 'core_instance_time'
    """
    from api import get_esh_driver
    ident = Identity.objects.get(id=identity_id)
    driver = get_cached_driver(identity=ident)
    core_instance_list = [
        convert_esh_instance(driver, inst, ident.provider.id, ident.id, user)
        for inst in instances
    ]
    #All instances that don't have an end-date should be
    #included, even if all of their time is not.
    time_used = core_instance_time(user,
                                   ident.id,
                                   delta_time,
                                   running=core_instance_list)
    return time_used
Exemplo n.º 52
0
def _get_instance_owner_map(provider, users=None):
    """
    All keys == All identities
    Values = List of identities / username
    NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the
          username // ex_tenant_name
    """
    admin_driver = get_cached_driver(provider=provider)
    all_identities = _select_identities(provider, users)
    all_instances = get_cached_instances(provider=provider)
    all_tenants = admin_driver._connection._keystone_list_tenants()
    # Convert instance.owner from tenant-id to tenant-name all at once
    all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
    # Make a mapping of owner-to-instance
    instance_map = _make_instance_owner_map(all_instances, users=users)
    logger.info("Instance owner map created")
    identity_map = _include_all_idents(all_identities, instance_map)
    logger.info("Identity map created")
    return identity_map
Exemplo n.º 53
0
def _set_compute_quota(user_quota, identity):
    # Use THESE values...
    compute_values = {
        'cores': user_quota.cpu,
        'ram': user_quota.memory*1024,  # NOTE: Value is stored in GB, Openstack (Liberty) expects MB
        'floating_ips': user_quota.floating_ip_count,
        'fixed_ips': user_quota.port_count,
        'instances': user_quota.instance_count,
        'force': True
    }
    creds = identity.get_all_credentials()
    use_tenant_id = False
    if creds.get('ex_force_auth_version', '2.0_password') == "2.0_password":
        compute_values.pop('instances')
        use_tenant_id = True

    username = identity.created_by.username
    logger.info("Updating quota for %s to %s" % (username, compute_values))
    driver = get_cached_driver(identity=identity)
    username = driver._connection.key
    tenant_id = driver._connection._get_tenant_id()
    ad = get_account_driver(identity.provider, raise_exception=True)
    ks_user = ad.get_user(username)
    admin_driver = ad.admin_driver
    creds = identity.get_all_credentials()
    if creds.get('ex_force_auth_version', '2.0_password') != "2.0_password":
        # FIXME: Remove 'use_tenant_id' when legacy clouds are no-longer in use.
        try:
            result = admin_driver._connection.ex_update_quota(tenant_id, compute_values, use_tenant_id=use_tenant_id)
        except Exception:
            logger.exception("Could not set a user-quota, trying to set tenant-quota")
            raise
    else:
        # For CyVerse old clouds, run the top method. don't use try/except.
        try:
            result = admin_driver._connection.ex_update_quota_for_user(
                tenant_id, ks_user.id, compute_values, use_tenant_id=use_tenant_id)
        except Exception:
            logger.exception("Could not set a user-quota, trying to set tenant-quota")
            raise
        logger.info("Updated quota for %s to %s" % (username, result))
    return result
Exemplo n.º 54
0
def destroy_instance(identity_uuid, instance_alias):
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    esh_driver = get_cached_driver(identity=identity)
    instance = esh_driver.get_instance(instance_alias)
    # Bail if instance doesnt exist
    if not instance:
        return None
    if isinstance(esh_driver, OSDriver):
        try:
            # Openstack: Remove floating IP first
            esh_driver._connection.ex_disassociate_floating_ip(instance)
        except Exception as exc:
            # Ignore 'safe' errors related to
            # no floating IP
            # or no Volume capabilities.
            if not ("floating ip not found" in exc.message
                    or "422 Unprocessable Entity Floating ip" in exc.message
                    or "500 Internal Server Error" in exc.message):
                raise
    node_destroyed = esh_driver._connection.destroy_node(instance)
    return node_destroyed
Exemplo n.º 55
0
def check_over_storage_quota(
        username, identity_uuid,
        new_snapshot_size=0, new_volume_size=0, raise_exc=True):
    """
    Checks quota based on current limits.
    param - new_snapshot_size - if included and non-zero, increase snapshot_count
    param - new_volume_size - if included and non-zero, add to storage total & increase storage_count
    param - raise_exc - if True, raise ValidationError, otherwise return False

    return True if passing
    return False if ValidationError occurs and raise_exc=False
    By default, allow ValidationError to raise.
    """
    memberships_available = IdentityMembership.objects.filter(
        identity__uuid=identity_uuid,
        member__memberships__user__username=username)
    if memberships_available:
        membership = memberships_available.first()
    identity = membership.identity
    quota = identity.quota
    driver = get_cached_driver(identity=identity)

    # FIXME: I don't believe that 'snapshot' size and 'volume' size share
    # the same quota, so for now we ignore 'snapshot-size',
    # and only care that value is 0 or >1
    new_snapshot = 1 if new_snapshot_size > 0 else 0

    new_disk = new_volume_size
    new_volume = 1 if new_volume_size > 0 else 0
    # Will throw ValidationError if false.
    try:
        has_storage_quota(driver, quota, new_disk)
        has_storage_count_quota(driver, quota, new_volume)
        has_snapshot_count_quota(driver, quota, new_snapshot)
        return True
    except ValidationError:
        if raise_exc:
            raise
        return False
Exemplo n.º 56
0
def delete_volume_snapshot(identity_uuid, snapshot_id):
    """
    Delete an existing volume snapshot
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        snapshot = driver._connection.ex_get_snapshot(snapshot_id)

        if not snapshot:
            raise Exception("No snapshot found for id=%s." % snapshot_id)

        success = driver._connection.ex_delete_snapshot(snapshot)

        if not success:
            raise Exception("Unable to delete snapshot with id=%s" %
                            snapshot_id)
    except SoftTimeLimitExceeded as e:
        delete_volume_snapshot.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise
Exemplo n.º 57
0
def delete_volume_snapshot(identity_uuid, snapshot_id):
    """
    Delete an existing volume snapshot
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        snapshot = driver._connection.ex_get_snapshot(snapshot_id)

        if not snapshot:
            raise Exception("No snapshot found for id=%s." % snapshot_id)

        success = driver._connection.ex_delete_snapshot(snapshot)

        if not success:
            raise Exception("Unable to delete snapshot with id=%s" %
                            snapshot_id)
    except SoftTimeLimitExceeded as e:
        delete_volume_snapshot.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise
Exemplo n.º 58
0
def _os_update_owner(provider_machine, tenant_name):
    from core.models import Provider
    from service.driver import get_admin_driver
    from service.cache import get_cached_machines, get_cached_driver
    provider = provider_machine.provider
    if provider not in Provider.get_active(type_name='openstack'):
        raise Exception("An active openstack provider is required to"
                        " update image owner")
    esh_driver = get_cached_driver(provider)
    if not esh_driver:
        raise Exception("The account driver of Provider %s is required to"
                        " update image metadata" % provider)
    esh_machines = get_cached_machines(provider, force=True)
    esh_machine = [mach for mach in esh_machines
                    if mach.alias == provider_machine.identifier]
    if not esh_machine:
        raise Exception("Machine with ID  %s not found"
                        % provider_machine.identifier)
    esh_machine = esh_machine[0]
    tenant_id = _tenant_name_to_id(provider_machine.provider, tenant_name)
    update_machine_metadata(esh_driver, esh_machine,
                            {"owner": tenant_id,
                             "application_owner": tenant_name})
Exemplo n.º 59
0
def set_provider_quota(identity_id):
    """
    """
    identity = Identity.objects.get(id=identity_id)
    if not identity.credential_set.all():
        #Can't update quota if credentials arent set
        return
    if identity.provider.get_type_name().lower() == 'openstack':
        driver = get_cached_driver(identity=identity)
        username = identity.created_by.username
        user_id = driver._connection._get_user_id()
        tenant_id = driver._connection._get_tenant_id()
        membership = IdentityMembership.objects.get(identity__id=identity_id,
                                                    member__name=username)
        user_quota = membership.quota
        if user_quota:
            values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024}
            logger.info("Updating quota for %s to %s" % (username, values))
            ad = AccountDriver(identity.provider)
            admin_driver = ad.admin_driver
            admin_driver._connection.ex_update_quota_for_user(
                tenant_id, user_id, values)
    return True
Exemplo n.º 60
0
def create_volume_from_snapshot(identity_uuid, snapshot_id, size_id, name,
                                description, metadata):
    """
    Create a new volume for the snapshot

    NOTE: The size must be at least the same size as the original volume.
    """
    try:
        identity = Identity.objects.get(uuid=identity_uuid)
        driver = get_cached_driver(identity=identity)
        snapshot = driver._connection.ex_get_snapshot(snapshot_id)
        size = driver._connection.ex_get_size(size_id)

        if not snapshot:
            raise Exception("No snapshot found for id=%s." % snapshot_id)

        if not size:
            raise Exception("No size found for id=%s." % size_id)

        success, esh_volume = driver._connection.create_volume(
            snapshot.size,
            name,
            description=description,
            metadata=metadata,
            snapshot=snapshot)

        if not success:
            raise Exception("Could not create volume from snapshot")

        # Save the new volume to the database
        convert_esh_volume(esh_volume, identity.provider.uuid, identity_uuid,
                           identity.created_by)
    except SoftTimeLimitExceeded as e:
        create_volume_from_snapshot.retry(exc=e)
    except Identity.DoesNotExist:
        logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
        raise