コード例 #1
0
async def check_domain_resource_limit(
    db_conn: SAConnection,
    sched_ctx: SchedulingContext,
    sess_ctx: PendingSession,
) -> PredicateResult:
    query = (sa.select([domains.c.total_resource_slots
                        ]).where(domains.c.name == sess_ctx.domain_name))
    domain_resource_slots = await db_conn.scalar(query)
    domain_resource_policy = {
        'total_resource_slots': domain_resource_slots,
        'default_for_unspecified': DefaultForUnspecified.UNLIMITED
    }
    total_domain_allowed = ResourceSlot.from_policy(domain_resource_policy,
                                                    sched_ctx.known_slot_types)
    domain_occupied = await sched_ctx.registry.get_domain_occupancy(
        sess_ctx.domain_name, conn=db_conn)
    log.debug('domain:{} current-occupancy: {}', sess_ctx.domain_name,
              domain_occupied)
    log.debug('domain:{} total-allowed: {}', sess_ctx.domain_name,
              total_domain_allowed)
    if not (domain_occupied + sess_ctx.requested_slots <=
            total_domain_allowed):
        return PredicateResult(
            False,
            'Your domain resource quota is exceeded. ({})'.format(' '.join(
                f'{k}={v}' for k, v in total_domain_allowed.to_humanized(
                    sched_ctx.known_slot_types).items())),
        )
    return PredicateResult(True)
コード例 #2
0
async def check_group_resource_limit(
    db_conn: SAConnection,
    sched_ctx: SchedulingContext,
    sess_ctx: PendingSession,
) -> PredicateResult:
    query = (sa.select([groups.c.total_resource_slots
                        ]).where(groups.c.id == sess_ctx.group_id))
    group_resource_slots = await db_conn.scalar(query)
    group_resource_policy = {
        'total_resource_slots': group_resource_slots,
        'default_for_unspecified': DefaultForUnspecified.UNLIMITED
    }
    total_group_allowed = ResourceSlot.from_policy(group_resource_policy,
                                                   sched_ctx.known_slot_types)
    group_occupied = await sched_ctx.registry.get_group_occupancy(
        sess_ctx.group_id, conn=db_conn)
    log.debug('group:{} current-occupancy: {}', sess_ctx.group_id,
              group_occupied)
    log.debug('group:{} total-allowed: {}', sess_ctx.group_id,
              total_group_allowed)
    if not (group_occupied + sess_ctx.requested_slots <= total_group_allowed):
        return PredicateResult(
            False, "Your group resource quota is exceeded. ({})".format(
                ' '.join(f'{k}={v}'
                         for k, v in total_group_allowed.to_humanized(
                             sched_ctx.known_slot_types).items())))
    return PredicateResult(True)
コード例 #3
0
async def check_keypair_resource_limit(
    db_conn: SAConnection,
    sched_ctx: SchedulingContext,
    sess_ctx: PendingSession,
) -> PredicateResult:
    query = (sa.select([
        keypair_resource_policies
    ]).select_from(keypair_resource_policies).where(
        keypair_resource_policies.c.name == sess_ctx.resource_policy))
    result = await db_conn.execute(query)
    resource_policy = await result.first()
    if len(sess_ctx.kernels) > resource_policy['max_containers_per_session']:
        return PredicateResult(
            False,
            f"You cannot create session with more than "
            f"{resource_policy['max_containers_per_session']} containers.",
            permanent=True,
        )
    total_keypair_allowed = ResourceSlot.from_policy(
        resource_policy, sched_ctx.known_slot_types)
    key_occupied = await sched_ctx.registry.get_keypair_occupancy(
        sess_ctx.access_key, conn=db_conn)
    log.debug('keypair:{} current-occupancy: {}', sess_ctx.access_key,
              key_occupied)
    log.debug('keypair:{} total-allowed: {}', sess_ctx.access_key,
              total_keypair_allowed)
    if not (key_occupied + sess_ctx.requested_slots <= total_keypair_allowed):
        return PredicateResult(
            False,
            "Your keypair resource quota is exceeded. ({})".format(' '.join(
                f'{k}={v}' for k, v in total_keypair_allowed.to_humanized(
                    sched_ctx.known_slot_types).items())),
        )
    return PredicateResult(True)
コード例 #4
0
async def check_keypair_resource_limit(
    db_conn: SAConnection,
    sched_ctx: SchedulingContext,
    sess_ctx: PendingSession,
) -> PredicateResult:
    query = (sa.select([
        keypair_resource_policies
    ]).select_from(keypair_resource_policies).where(
        keypair_resource_policies.c.name == sess_ctx.resource_policy))
    result = await db_conn.execute(query)
    resource_policy = await result.first()
    total_keypair_allowed = ResourceSlot.from_policy(
        resource_policy, sched_ctx.known_slot_types)
    key_occupied = await sched_ctx.registry.get_keypair_occupancy(
        sess_ctx.access_key, conn=db_conn)
    log.debug('keypair:{} current-occupancy: {}', sess_ctx.access_key,
              key_occupied)
    log.debug('keypair:{} total-allowed: {}', sess_ctx.access_key,
              total_keypair_allowed)
    if not (key_occupied + sess_ctx.requested_slots <= total_keypair_allowed):

        async def update_status_info(
            db_conn: SAConnection,
            sched_ctx: SchedulingContext,
            sess_ctx: PendingSession,
        ) -> None:
            query = (sa.update(kernels).values(
                status_info='out-of-resource (keypair resource quota exceeded)'
            ).where(kernels.c.id == sess_ctx.kernel_id))
            await db_conn.execute(query)

        return PredicateResult(
            False,
            'Your keypair resource quota is exceeded. ({})'.format(' '.join(
                f'{k}={v}' for k, v in total_keypair_allowed.to_humanized(
                    sched_ctx.known_slot_types).items())),
            failure_cb=update_status_info)
    return PredicateResult(True)
コード例 #5
0
async def check_presets(request: web.Request, params: Any) -> web.Response:
    '''
    Returns the list of all resource presets in the current scaling group,
    with additional information including allocatability of each preset,
    amount of total remaining resources, and the current keypair resource limits.
    '''
    try:
        access_key = request['keypair']['access_key']
        resource_policy = request['keypair']['resource_policy']
        domain_name = request['user']['domain_name']
        # TODO: uncomment when we implement scaling group.
        # scaling_group = request.query.get('scaling_group')
        # assert scaling_group is not None, 'scaling_group parameter is missing.'
    except (json.decoder.JSONDecodeError, AssertionError) as e:
        raise InvalidAPIParameters(extra_msg=str(e.args[0]))
    registry = request.app['registry']
    known_slot_types = await registry.config_server.get_resource_slots()
    resp: MutableMapping[str, Any] = {
        'keypair_limits': None,
        'keypair_using': None,
        'keypair_remaining': None,
        'scaling_group_remaining': None,
        'scaling_groups': None,
        'presets': [],
    }
    log.info('CHECK_PRESETS (ak:{}, g:{}, sg:{})',
             request['keypair']['access_key'], params['group'],
             params['scaling_group'])

    async with request.app['dbpool'].acquire() as conn, conn.begin():
        # Check keypair resource limit.
        keypair_limits = ResourceSlot.from_policy(resource_policy,
                                                  known_slot_types)
        keypair_occupied = await registry.get_keypair_occupancy(access_key,
                                                                conn=conn)
        keypair_remaining = keypair_limits - keypair_occupied

        # Check group resource limit and get group_id.
        j = sa.join(groups, association_groups_users,
                    association_groups_users.c.group_id == groups.c.id)
        query = (sa.select(
            [groups.c.id, groups.c.total_resource_slots]).select_from(j).where(
                (association_groups_users.c.user_id == request['user']['uuid'])
                & (groups.c.name == params['group'])
                & (domains.c.name == domain_name)))
        result = await conn.execute(query)
        row = await result.fetchone()
        group_id = row.id
        group_resource_slots = row.total_resource_slots
        if group_id is None:
            raise InvalidAPIParameters('Unknown user group')
        group_resource_policy = {
            'total_resource_slots': group_resource_slots,
            'default_for_unspecified': DefaultForUnspecified.UNLIMITED
        }
        group_limits = ResourceSlot.from_policy(group_resource_policy,
                                                known_slot_types)
        group_occupied = await registry.get_group_occupancy(group_id,
                                                            conn=conn)
        group_remaining = group_limits - group_occupied

        # Check domain resource limit.
        query = (sa.select([domains.c.total_resource_slots
                            ]).where(domains.c.name == domain_name))
        domain_resource_slots = await conn.scalar(query)
        domain_resource_policy = {
            'total_resource_slots': domain_resource_slots,
            'default_for_unspecified': DefaultForUnspecified.UNLIMITED
        }
        domain_limits = ResourceSlot.from_policy(domain_resource_policy,
                                                 known_slot_types)
        domain_occupied = await registry.get_domain_occupancy(domain_name,
                                                              conn=conn)
        domain_remaining = domain_limits - domain_occupied

        # Take minimum remaining resources. There's no need to merge limits and occupied.
        # To keep legacy, we just merge all remaining slots into `keypair_remainig`.
        for slot in known_slot_types:
            keypair_remaining[slot] = min(
                keypair_remaining[slot],
                group_remaining[slot],
                domain_remaining[slot],
            )

        # Prepare per scaling group resource.
        sgroups = await query_allowed_sgroups(conn, domain_name, group_id,
                                              access_key)
        sgroup_names = [sg.name for sg in sgroups]
        if params['scaling_group'] is not None:
            if params['scaling_group'] not in sgroup_names:
                raise InvalidAPIParameters('Unknown scaling group')
            sgroup_names = [params['scaling_group']]
        per_sgroup = {
            sgname: {
                'using':
                ResourceSlot({k: Decimal(0)
                              for k in known_slot_types.keys()}),
                'remaining':
                ResourceSlot({k: Decimal(0)
                              for k in known_slot_types.keys()}),
            }
            for sgname in sgroup_names
        }

        # Per scaling group resource using from resource occupying kernels.
        query = (sa.select([
            kernels.c.occupied_slots, kernels.c.scaling_group
        ]).select_from(kernels).where(
            (kernels.c.user_uuid == request['user']['uuid'])
            & (kernels.c.status.in_(AGENT_RESOURCE_OCCUPYING_KERNEL_STATUSES))
            & (kernels.c.scaling_group.in_(sgroup_names))))
        async for row in conn.execute(query):
            per_sgroup[row.scaling_group]['using'] += row.occupied_slots

        # Per scaling group resource remaining from agents stats.
        sgroup_remaining = ResourceSlot(
            {k: Decimal(0)
             for k in known_slot_types.keys()})
        query = (sa.select([
            agents.c.available_slots, agents.c.occupied_slots,
            agents.c.scaling_group
        ]).select_from(agents).where((agents.c.status == AgentStatus.ALIVE) & (
            agents.c.scaling_group.in_(sgroup_names))))
        agent_slots = []
        async for row in conn.execute(query):
            remaining = row['available_slots'] - row['occupied_slots']
            remaining += ResourceSlot(
                {k: Decimal(0)
                 for k in known_slot_types.keys()})
            sgroup_remaining += remaining
            agent_slots.append(remaining)
            per_sgroup[row.scaling_group]['remaining'] += remaining

        # Take maximum allocatable resources per sgroup.
        for sgname, sgfields in per_sgroup.items():
            for rtype, slots in sgfields.items():
                if rtype == 'remaining':
                    for slot in known_slot_types.keys():
                        if slot in slots:
                            slots[slot] = min(keypair_remaining[slot],
                                              slots[slot])
                per_sgroup[sgname][rtype] = slots.to_json(
                )  # type: ignore  # it's serialization
        for slot in known_slot_types.keys():
            sgroup_remaining[slot] = min(keypair_remaining[slot],
                                         sgroup_remaining[slot])

        # Fetch all resource presets in the current scaling group.
        query = (sa.select([resource_presets]).select_from(resource_presets))
        async for row in conn.execute(query):
            # Check if there are any agent that can allocate each preset.
            allocatable = False
            preset_slots = row['resource_slots'].normalize_slots(
                ignore_unknown=True)
            for agent_slot in agent_slots:
                if agent_slot >= preset_slots and keypair_remaining >= preset_slots:
                    allocatable = True
                    break
            resp['presets'].append({
                'name':
                row['name'],
                'resource_slots':
                preset_slots.to_json(),
                'shared_memory':
                str(row['shared_memory'])
                if row['shared_memory'] is not None else None,
                'allocatable':
                allocatable,
            })

        # Return group resource status as NaN if not allowed.
        group_resource_visibility = await request.app[
            'registry'].config_server.get(
                'config/api/resources/group_resource_visibility')
        group_resource_visibility = t.ToBool().check(group_resource_visibility)
        if not group_resource_visibility:
            group_limits = ResourceSlot(
                {k: Decimal('NaN')
                 for k in known_slot_types.keys()})
            group_occupied = ResourceSlot(
                {k: Decimal('NaN')
                 for k in known_slot_types.keys()})
            group_remaining = ResourceSlot(
                {k: Decimal('NaN')
                 for k in known_slot_types.keys()})

        resp['keypair_limits'] = keypair_limits.to_json()
        resp['keypair_using'] = keypair_occupied.to_json()
        resp['keypair_remaining'] = keypair_remaining.to_json()
        resp['group_limits'] = group_limits.to_json()
        resp['group_using'] = group_occupied.to_json()
        resp['group_remaining'] = group_remaining.to_json()
        resp['scaling_group_remaining'] = sgroup_remaining.to_json()
        resp['scaling_groups'] = per_sgroup
    return web.json_response(resp, status=200)
コード例 #6
0
async def check_presets(request: web.Request, params: Any) -> web.Response:
    '''
    Returns the list of all resource presets in the current scaling group,
    with additional information including allocatability of each preset,
    amount of total remaining resources, and the current keypair resource limits.
    '''
    try:
        access_key = request['keypair']['access_key']
        resource_policy = request['keypair']['resource_policy']
        # TODO: uncomment when we implement scaling group.
        # scaling_group = request.query.get('scaling_group')
        # assert scaling_group is not None, 'scaling_group parameter is missing.'
    except (json.decoder.JSONDecodeError, AssertionError) as e:
        raise InvalidAPIParameters(extra_msg=str(e.args[0]))
    registry = request.app['registry']
    known_slot_types = await registry.config_server.get_resource_slots()
    keypair_limits = ResourceSlot.from_policy(resource_policy,
                                              known_slot_types)
    resp: MutableMapping[str, Any] = {
        'keypair_limits': None,
        'keypair_using': None,
        'keypair_remaining': None,
        'scaling_group_remaining': None,
        'presets': [],
    }
    async with request.app['dbpool'].acquire() as conn, conn.begin():
        keypair_occupied = await registry.get_keypair_occupancy(access_key,
                                                                conn=conn)
        keypair_remaining = keypair_limits - keypair_occupied
        resp['keypair_limits'] = keypair_limits.to_json()
        resp['keypair_using'] = keypair_occupied.to_json()
        resp['keypair_remaining'] = keypair_remaining.to_json()
        # query all agent's capacity and occupancy
        agent_slots = []

        j = sa.join(groups, association_groups_users,
                    association_groups_users.c.group_id == groups.c.id)
        query = (sa.select(
            [association_groups_users.c.group_id]).select_from(j).where(
                (association_groups_users.c.user_id == request['user']['uuid'])
                & (groups.c.name == params['group'])))
        group_id = await conn.scalar(query)
        if group_id is None:
            raise InvalidAPIParameters('Unknown user group')

        sgroups = await query_allowed_sgroups(conn,
                                              request['user']['domain_name'],
                                              group_id, access_key)
        sgroups = [sg.name for sg in sgroups]
        if params['scaling_group'] is not None:
            if params['scaling_group'] not in sgroups:
                raise InvalidAPIParameters('Unknown scaling group')
            sgroups = [params['scaling_group']]

        sgroup_remaining = ResourceSlot(
            {k: Decimal(0)
             for k in known_slot_types.keys()})
        query = (sa.select([
            agents.c.available_slots, agents.c.occupied_slots
        ]).select_from(agents).where((agents.c.status == AgentStatus.ALIVE)
                                     & (agents.c.scaling_group.in_(sgroups))))
        async for row in conn.execute(query):
            remaining = row['available_slots'] - row['occupied_slots']
            sgroup_remaining += remaining
            agent_slots.append(remaining)
        resp['scaling_group_remaining'] = sgroup_remaining.to_json()
        # fetch all resource presets in the current scaling group.
        query = (sa.select([resource_presets]).select_from(resource_presets))
        async for row in conn.execute(query):
            # check if there are any agent that can allocate each preset
            allocatable = False
            preset_slots = row['resource_slots'].filter_slots(known_slot_types)
            for agent_slot in agent_slots:
                if agent_slot >= preset_slots and keypair_remaining >= preset_slots:
                    allocatable = True
                    break
            resp['presets'].append({
                'name': row['name'],
                'resource_slots': preset_slots.to_json(),
                'allocatable': allocatable,
            })
    return web.json_response(resp, status=200)