def get_cpu_shares(workload: Workload) -> int:
    if workload.is_opportunistic():
        opportunistic_shares_scale = get_config_manager().get_int(
            OPPORTUNISTIC_SHARES_SCALE_KEY, DEFAULT_OPPORTUNISTIC_SHARES_SCALE)
        return workload.get_thread_count() * opportunistic_shares_scale

    return workload.get_thread_count() * DEFAULT_SHARES_SCALE
Exemplo n.º 2
0
def get_workload_response(workload: Workload,
                          cpu: Cpu) -> Optional[WorkloadAllocateResponse]:
    thread_ids = get_threads(cpu, workload.get_id())
    cpu_shares = get_cpu_shares(workload)
    cpu_quota = get_cpu_quota(workload)

    if len(thread_ids) < 1:
        return None

    memory_migrate = DEFAULT_TITUS_ISOLATE_MEMORY_MIGRATE
    memory_spread_page = DEFAULT_TITUS_ISOLATE_MEMORY_SPREAD_PAGE
    memory_spread_slab = DEFAULT_TITUS_ISOLATE_MEMORY_SPREAD_SLAB

    config_manager = get_config_manager()
    if config_manager is not None:
        memory_migrate = config_manager.get_cached_bool(
            TITUS_ISOLATE_MEMORY_MIGRATE, DEFAULT_TITUS_ISOLATE_MEMORY_MIGRATE)
        memory_spread_page = config_manager.get_cached_bool(
            TITUS_ISOLATE_MEMORY_SPREAD_PAGE,
            DEFAULT_TITUS_ISOLATE_MEMORY_SPREAD_PAGE)
        memory_spread_slab = config_manager.get_cached_bool(
            TITUS_ISOLATE_MEMORY_SPREAD_SLAB,
            DEFAULT_TITUS_ISOLATE_MEMORY_SPREAD_SLAB)

    return WorkloadAllocateResponse(workload_id=workload.get_id(),
                                    thread_ids=thread_ids,
                                    cpu_shares=cpu_shares,
                                    cpu_quota=cpu_quota,
                                    memory_migrate=memory_migrate,
                                    memory_spread_page=memory_spread_page,
                                    memory_spread_slab=memory_spread_slab)
def assign_threads(workload: Workload) -> Cpu:
    cpu = get_cpu()
    threads = cpu.get_threads()

    total_thread_count = workload.get_thread_count(
    ) + workload.get_opportunistic_thread_count()
    for i in range(total_thread_count):
        threads[i].claim(workload.get_id())

    return cpu
def get_workload_response(workload: Workload,
                          cpu: Cpu) -> Optional[WorkloadAllocateResponse]:
    thread_ids = get_threads(cpu, workload.get_id())
    cpu_shares = get_cpu_shares(workload)
    cpu_quota = get_cpu_quota(workload)

    if len(thread_ids) < 1:
        return None

    return WorkloadAllocateResponse(workload.get_id(), thread_ids, cpu_shares,
                                    cpu_quota)
Exemplo n.º 5
0
 def predict(self, workload: Workload,
             cpu_usage_last_hour: np.array,
             pred_env: PredEnvironment) -> float:
     if workload.get_id() == 'static_a':
         return workload.get_thread_count() * 0.8
     elif workload.get_id() == 'static_b':
         return workload.get_thread_count() * 0.01
     elif workload.get_id() == 'burst_c':
         return workload.get_thread_count() * 0.9
Exemplo n.º 6
0
 def predict(self, workload: Workload, cpu_usage_last_hour: np.array,
             pred_env: PredEnvironment) -> float:
     return workload.get_thread_count() * self.__constant_percent_busy / 100
def get_cpu_quota(workload: Workload) -> int:
    if workload.is_burst():
        return -1

    return workload.get_thread_count() * DEFAULT_QUOTA_SCALE
Exemplo n.º 8
0
def _occupies_entire_cpu(workload: Workload, cpu: Cpu):
    return len(cpu.get_threads()) == workload.get_thread_count()
Exemplo n.º 9
0
def get_duration(workload: Workload, percentile: float) -> Optional[float]:
    for p in workload.get_duration_predictions():
        if p.get_percentile() == percentile:
            return p.get_duration()

    return None
    def predict(self, workload: Workload, cpu_usage_last_hour: np.array,
                pred_env: PredEnvironment) -> float:
        image = workload.get_image()
        tokens = image.split('@')
        valid_digest = False
        if cpu_usage_last_hour is None:
            cpu_usage_last_hour = np.full((60, ), np.nan, dtype=np.float32)
        image_name = None
        if len(tokens) == 2 and tokens[-1].startswith("sha256:"):
            m = self._img_name_regex.search(tokens[0])
            if m is not None:
                valid_digest = True
                image_name = m.groups(0)[0]
                entry_point = workload.get_entrypoint()[:1000]
                filter_key = "%s@%s" % (tokens[-1], entry_point)
        if self.__use_whitelist and valid_digest and (
                filter_key not in self.__model.filter):
            # not in whitelist, predict without context features
            q = Query2(
                None,  # image_name
                None,  # user
                None,  # app_name
                workload.get_thread_count(),
                None,  # ram_requested
                None,  # disk_requested
                None,  # network_requested
                None,  # job_type
                None,  # region
                None,  # env
                None,  # hour of day
                build_ts_features(cpu_usage_last_hour))
        else:
            q = Query2(image_name, workload.get_owner_email(),
                       workload.get_app_name(), workload.get_thread_count(),
                       workload.get_mem(), workload.get_disk(),
                       workload.get_network(),
                       workload.get_job_type().lower(), pred_env.region,
                       pred_env.nflx_env, pred_env.hour_of_day,
                       build_ts_features(cpu_usage_last_hour))

        return min(self.__model.ml_model.predict_single(q),
                   workload.get_thread_count())