def __init__(self): super(Serializer, self).__init__(predicate=evaluators.All( evaluators.TaskTypeEq('read_value'), evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed', 'cancelled'}), ), delegate=ResultSerializer)
def __init__(self, job): super(Evaluator, self).__init__(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskTypeEq('run_test'), ), delegate=evaluators.DispatchByEventTypeEvaluator({ 'initiate': evaluators.FilteringEvaluator(predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator( job)), # For updates, we want to ensure that the initiate evaluator # has a chance to run on 'pending' tasks. 'update': evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator(job)), evaluators.FilteringEvaluator( predicate=evaluators.TaskStatusIn({'ongoing'}), delegate=UpdateEvaluator(job)), ]) })), evaluators.TaskPayloadLiftingEvaluator(), ))
def __init__(self, job): super(Evaluator, self).__init__( predicate=evaluators.All(evaluators.TaskTypeEq('read_value'), evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( evaluators=(evaluators.TaskPayloadLiftingEvaluator(), ReadValueEvaluator(job))))
def __init__(self): super(Serializer, self).__init__(predicate=evaluators.All( evaluators.TaskTypeEq('find_culprit'), evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed', 'cancelled'}), ), delegate=AnalysisSerializer)
def CompoundEvaluatorForTesting(self, fake_evaluator): return evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskTypeEq('read_value'), evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( [fake_evaluator, evaluators.TaskPayloadLiftingEvaluator()])), evaluators.SequenceEvaluator([ performance_bisection.Evaluator(self.job), evaluators.TaskPayloadLiftingEvaluator( exclude_keys={'commits'}) ]), ])
def __init__(self, job): super(Evaluator, self).__init__( evaluators=( evaluators.TaskPayloadLiftingEvaluator(), evaluators.FilteringEvaluator( predicate=evaluators.All( evaluators.TaskTypeEq('find_isolate'), evaluators.TaskIsEventTarget(), evaluators.Not( evaluators.TaskStatusIn( {'completed', 'failed', 'cancelled'})), ), delegate=evaluators.DispatchByEventTypeEvaluator({ 'initiate': InitiateEvaluator(job), 'update': UpdateEvaluator(job), })), ))
def BisectionEvaluatorForTesting(self, *seeded_evaluators): """Creates an evaluator for bisection with the provided evaluators. This is a utility function for creating a bisection evaluator which has the list of evaluators nested within the sequence of evaluators. """ return evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( list(seeded_evaluators) + [evaluators.TaskPayloadLiftingEvaluator()])), evaluators.SequenceEvaluator([ performance_bisection.Evaluator(self.job), evaluators.TaskPayloadLiftingEvaluator(exclude_keys={'commits'}) ]), ])
def __init__(self, job): super(Evaluator, self).__init__(evaluators=( evaluators.TaskPayloadLiftingEvaluator(), evaluators.FilteringEvaluator( predicate=evaluators.All( evaluators.TaskTypeEq('run_test'), evaluators.TaskIsEventTarget(), ), delegate=evaluators.DispatchByEventTypeEvaluator({ 'initiate': evaluators.FilteringEvaluator(predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator( job)), 'update': evaluators.FilteringEvaluator( predicate=evaluators.TaskStatusIn({'ongoing'}), delegate=UpdateEvaluator(job)), })), ))
def __init__(self, job): super(Evaluator, self).__init__( predicate=evaluators.All( evaluators.TaskTypeEq('find_culprit'), evaluators.Not(evaluators.TaskStatusIn({'completed', 'failed'}))), delegate=FindCulprit(job))