def test__query_time_range(create_jobs): salt = create_jobs[0] respoolID = create_jobs[1] min_time = timestamp_pb2.Timestamp() max_time = timestamp_pb2.Timestamp() dt = datetime.today() - timedelta(days=1) min_time.FromDatetime(dt) max_time.GetCurrentTime() time_range = peloton.TimeRange( min=min_time, max=max_time, ) spec = job_pb2.QuerySpec( keywords=[salt], completionTimeRange=time_range, ) resp = query_by_spec(respoolID, spec=spec) # This is a query with keyword for jobs completed # over last one day. This should yield # 2 * NUM_JOBS_PER_STATE jobs for SUCCEEDED and # FAILED states, both of which will have completion time assert len(resp.results) == 2 * NUM_JOBS_PER_STATE spec = job_pb2.QuerySpec( keywords=[salt], creationTimeRange=time_range, ) resp = query_by_spec(respoolID, spec=spec) # This is a query with keyword for jobs completed # over last one day. This should yield # 3 * NUM_JOBS_PER_STATE jobs for RUNNING, SUCCEEDED and # FAILED states assert len(resp.results) == 3 * NUM_JOBS_PER_STATE # use min_time as max and max_time as min # This should result in error in response bad_time_range = peloton.TimeRange( max=min_time, min=max_time, ) spec = job_pb2.QuerySpec( keywords=[salt], creationTimeRange=bad_time_range, ) resp = query_by_spec(respoolID, spec=spec) assert resp.HasField('error')
def test__query_job_negative(create_jobs): respoolID = create_jobs[1] # query by name and label spec_by_name = job_pb2.QuerySpec( name='deadbeef', ) resp = query_by_spec(respoolID, spec_by_name) assert len(resp.results) == 0
def test__query_job_by_name(create_jobs): running_jobs = create_jobs[2]['RUNNING'] spec_by_name = job_pb2.QuerySpec( name=running_jobs[0].get_info().config.name, ) resp = query_by_spec(spec=spec_by_name) assert len(resp.results) == NUM_JOBS_PER_STATE
def test__query_job_by_keyword(create_jobs): respoolID = create_jobs[1] running_jobs = create_jobs[2]['RUNNING'] spec_by_keyword = job_pb2.QuerySpec( keywords=[running_jobs[0].get_info().config.name], ) resp = query_by_spec(respoolID, spec_by_keyword) assert len(resp.results) == NUM_JOBS_PER_STATE
def test__query_pagination(create_jobs, peloton_client): salt = create_jobs[0] respoolID = create_jobs[1] pagination = query.PaginationSpec(offset=0, limit=2, maxLimit=5) spec_pagination = job_pb2.QuerySpec(keywords=[salt], pagination=pagination) resp = query_by_spec(peloton_client, respoolID, spec=spec_pagination) assert len(resp.results) == 2 pagination.maxLimit = 2 spec_pagination = job_pb2.QuerySpec(keywords=[salt], pagination=pagination) resp = query_by_spec(peloton_client, respoolID, spec=spec_pagination) assert len(resp.results) == 2 pagination.offset = 1 spec_pagination = job_pb2.QuerySpec(keywords=[salt], pagination=pagination) resp = query_by_spec(peloton_client, respoolID, spec=spec_pagination) assert len(resp.results) == 1
def test__query_job_by_owner_by_name(create_jobs): respoolID = create_jobs[1] running_jobs = create_jobs[2]['RUNNING'] spec_by_owner_name = job_pb2.QuerySpec( owner=running_jobs[0].get_info().config.owningTeam, name=running_jobs[0].get_info().config.name, ) resp = query_by_spec(respoolID, spec_by_owner_name) assert len(resp.results) == NUM_JOBS_PER_STATE
def test__query_job_by_owner(create_jobs): respoolID = create_jobs[1] running_jobs = create_jobs[2]['RUNNING'] spec_by_owner = job_pb2.QuerySpec( owner=running_jobs[0].get_info().config.owningTeam, ) resp = query_by_spec(respoolID, spec_by_owner) # We should find NUM_JOBS_PER_STATE number of jobs for each of the # three states (RUNNING, FAILED, SUCCEEDED) for this owner assert len(resp.results) == 3 * NUM_JOBS_PER_STATE
def test__query_job_by_name_by_label(create_jobs): respoolID = create_jobs[1] running_jobs = create_jobs[2]["RUNNING"] # query by name and label spec_by_label = job_pb2.QuerySpec( name=running_jobs[0].get_info().config.name, labels=[peloton.Label(key="testKey0", value="testVal0")], ) resp = query_by_spec(respoolID, spec_by_label) assert len(resp.results) == NUM_JOBS_PER_STATE
def get_jobs_by_label(self, label, name, job_states): """ :param label: the label value of the job :param name: the name of the job :param job_states: the job status :type label: str :type name: str :type job_states: dict :rtype: Response """ request = job.QueryRequest( respoolID=peloton.ResourcePoolID(value=self.respool_id), spec=job.QuerySpec( pagination=query.PaginationSpec( offset=0, limit=100, ), labels=[ peloton.Label( key='cluster_name', value=label, ), peloton.Label( key='module_name', value=name, ), ], jobStates=job_states, ), ) try: records = self.client.job_svc.Query( request, metadata=self.client.jobmgr_metadata, timeout=default_timeout, ).records ids = [record.id.value for record in records] return ids except Exception, e: print_fail('Exception calling Get job :%s' % str(e)) raise
def test__query_completed_jobs(create_jobs): salt = create_jobs[0] respoolID = create_jobs[1] failed_jobs = create_jobs[2]['FAILED'] # name is structured as "TestJob-<6 letter salt>-<count>" # we will use <salt> to restrict the query scope. spec_completed_jobs = job_pb2.QuerySpec( keywords=[salt], jobStates=[ job_pb2.JobState.Value('SUCCEEDED'), job_pb2.JobState.Value('KILLED'), job_pb2.JobState.Value('FAILED'), ], pagination=query.PaginationSpec( offset=0, limit=500, maxLimit=1000, orderBy=[ query.OrderBy( order=query.OrderBy.Order.Value('DESC'), property=query.PropertyPath(value='completion_time'), ) ], ), ) resp = query_by_spec(respoolID, spec=spec_completed_jobs) # We should see NUM_JOBS_PER_STATE SUCCEEDED # and NUM_JOBS_PER_STATE FAILED jobs assert len(resp.results) == 2 * NUM_JOBS_PER_STATE # test descending order, the job that completed last should show up first # in this case it should be the last job in the failed jobs list assert resp.results[0].name == failed_jobs[NUM_JOBS_PER_STATE - 1].get_info().config.name
def test__query_job_negative(create_jobs, peloton_client): respoolID = create_jobs[1] # query by name and label spec_by_name = job_pb2.QuerySpec(name="deadbeef") resp = query_by_spec(peloton_client, respoolID, spec_by_name) assert len(resp.results) == 0