def test_create_pool_config(self): expected = respool.ResourcePoolConfig( name="test_respool", resources=[ respool.ResourceConfig(kind="cpu", reservation=1.0, limit=1.0, share=1), respool.ResourceConfig(kind="memory", reservation=1024, limit=1024, share=1), respool.ResourceConfig(kind="disk", reservation=1024, limit=1024, share=1), ], parent=peloton.ResourcePoolID(value="root"), ) actual = create_pool_config(name="test_respool", cpu=1.0, memory=1024, disk=1024) self.assertEqual(actual, expected)
def get_jobs_by_label(self, label, name, job_states): """ :param label: the label value of the job :param name: the name of the job :param job_states: the job status :type label: str :type name: str :type job_states: dict :rtype: Response """ request = job.QueryRequest( respoolID=peloton.ResourcePoolID(value=self.respool_id), spec=job.QuerySpec( pagination=query.PaginationSpec(offset=0, limit=100), labels=[ peloton.Label(key="cluster_name", value=label), peloton.Label(key="module_name", value=name), ], jobStates=job_states, ), ) try: records = self.client.job_svc.Query( request, metadata=self.client.jobmgr_metadata, timeout=default_timeout, ).records ids = [record.id.value for record in records] return ids except Exception as e: print_fail("Exception calling Get job :%s" % str(e)) raise
def create_pool_config(name, cpu, memory, disk): """ type name: string type cpu: float type memory: float type disk: float rtype: respool.ResourcePoolConfig """ return respool.ResourcePoolConfig( name=name, resources=[ respool.ResourceConfig( kind='cpu', reservation=cpu, limit=cpu, share=1, ), respool.ResourceConfig( kind='memory', reservation=memory, limit=memory, share=1, ), respool.ResourceConfig( kind='disk', reservation=disk, limit=disk, share=1, ), ], parent=peloton.ResourcePoolID(value='root') )
def test_create_pool_config(self): expected = respool.ResourcePoolConfig( name='test_respool', resources=[ respool.ResourceConfig( kind='cpu', reservation=1.0, limit=1.0, share=1, ), respool.ResourceConfig( kind='memory', reservation=1024, limit=1024, share=1, ), respool.ResourceConfig( kind='disk', reservation=1024, limit=1024, share=1, ), ], parent=peloton.ResourcePoolID(value='root') ) actual = create_pool_config( name='test_respool', cpu=1.0, memory=1024, disk=1024 ) self.assertEqual(actual, expected)
def get_job_config_spec(self, label, name, num_instance, default_task_config, instance_config=None, **extra): """ Creates a job.JobConfig object :param label: the label value of the job :param name: the name of the job :param respool_id: the id of the resource pool :param num_instance: the number of instance of the job :param default_task_config: the default task config of the job :param instance_config: instance specific task config :param extra: extra information of the job :type label: str :type name: str :type respool_id: str :type num_instance: int :type default_task_config: task.TaskConfig :type instance_config: dict<int, task.TaskConfig> :type extra: dict """ return job.JobConfig( name=name, type=extra.get('job_type', job.SERVICE), labels=[ peloton.Label( key='cluster_name', value=label, ), peloton.Label( key='module_name', value=name, ), ], owningTeam=extra.get('owningTeam', 'compute'), description=extra.get('description', 'compute task'), instanceCount=num_instance, defaultConfig=default_task_config, instanceConfig=instance_config, # sla is required by resmgr sla=job.SlaConfig( priority=1, preemptible=True, ), respoolID=peloton.ResourcePoolID(value=self.respool_id), changeLog=extra.get('change_log', None))
def pool_info(self): """ :return: the resource pool info """ assert self.id, "No resource pool ID defined" request = respool.GetRequest(id=peloton.ResourcePoolID(value=self.id)) resp = self.client.respool_svc.GetResourcePool( request, metadata=self.client.resmgr_metadata, timeout=self.config.rpc_timeout_sec, ) assert not resp.HasField("error"), resp return resp.poolinfo
def create_job(self, instance_num, use_instance_config, sleep_time): default_config = self.create_pod_config(sleep_time, 'static') instance_config = {} if use_instance_config: for i in range(0, instance_num): instance_config[i] = self.create_pod_config( sleep_time, 'instance %s' % i) request = job.CreateRequest( config=job.JobConfig( name='instance %s && sleep %s && instance config %s' % (instance_num, sleep_time, use_instance_config), labels=[ peloton.Label( key='task_num', value=str(instance_num), ), peloton.Label( key='sleep_time', value=str(sleep_time), ), peloton.Label( key='use_instance_config', value=str(use_instance_config), ), ], owningTeam='compute', description='test job', instanceCount=instance_num, defaultConfig=default_config, instanceConfig=instance_config, # sla is required by resmgr sla=job.SlaConfig( priority=1, preemptible=False, ), respoolID=peloton.ResourcePoolID(value=self.respool_id), ), ) resp = self.client.job_svc.Create( request, metadata=self.client.jobmgr_metadata, timeout=default_timeout, ) self.job_id = resp.jobId.value return resp.jobId.value