def test_request_bad_extra_args(self): _gen_request( properties=_gen_properties( command=[], extra_args=[u'python']*128, inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))).put() with self.assertRaises(datastore_errors.BadValueError): _gen_request( properties=_gen_properties( command=[], extra_args=[u'python']*129, inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))).put() with self.assertRaises(datastore_errors.BadValueError): _gen_request( properties=_gen_properties( command=[u'python'], extra_args=[u'python'], inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))).put()
def test_request_bad_inputs_ref(self): # Both command and inputs_ref.isolated. _gen_request(properties=_gen_properties( command=['python'], inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))).put() with self.assertRaises(datastore_errors.BadValueError): # Bad digest. _gen_request(properties=_gen_properties( command=['see', 'spot', 'run'], inputs_ref=task_request.FilesRef( isolated='deadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))) # inputs_ref without server/namespace. req = _gen_request(properties=_gen_properties( inputs_ref=task_request.FilesRef())) with self.assertRaises(datastore_errors.BadValueError): req.put() with self.assertRaises(datastore_errors.BadValueError): # Without digest nor command. _gen_request(properties=_gen_properties( command=[], inputs_ref=task_request.FilesRef( isolatedserver='https://isolateserver.appspot.com', namespace='default-gzip^^^'))) # Command and server can be skipped. _gen_request(properties=_gen_properties( command=[], inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))).put()
def apply_server_property_defaults(properties): """Fills ndb task properties with default values read from server settings.""" cfg = config.settings() if not cfg: return cfg = config.settings() if cfg.isolate.default_server and cfg.isolate.default_namespace: properties.inputs_ref = properties.inputs_ref or task_request.FilesRef( ) properties.inputs_ref.isolatedserver = ( properties.inputs_ref.isolatedserver or cfg.isolate.default_server) properties.inputs_ref.namespace = (properties.inputs_ref.namespace or cfg.isolate.default_namespace) if cfg.HasField('cipd') and properties.cipd_input: properties.cipd_input.server = (properties.cipd_input.server or cfg.cipd.default_server) properties.cipd_input.client_package = ( properties.cipd_input.client_package or task_request.CipdPackage()) properties.cipd_input.client_package.package_name = ( properties.cipd_input.client_package.package_name or cfg.cipd.default_client_package.package_name) properties.cipd_input.client_package.version = ( properties.cipd_input.client_package.version or cfg.cipd.default_client_package.version)
def apply_server_property_defaults(properties): """Fills ndb task properties with default values read from server settings.""" settings = config.settings() # TODO(iannucci): This was an artifact of the existing test harnesses; # get_pool_config raises on None, but the way it's mocked in # ./test_env_handlers.py allows `get_pool_config` to return None in this case. # This try/except will be cleaned up in a subsequent CL, once I remove these # default services from `config`. try: pool_cfg = pools_config.get_pool_config(properties.pool) except ValueError: pool_cfg = None if not settings and not pool_cfg: return iso_server = settings.isolate.default_server iso_ns = settings.isolate.default_namespace if pool_cfg and pool_cfg.default_isolate: iso_server = pool_cfg.default_isolate.server iso_ns = pool_cfg.default_isolate.namespace if iso_server and iso_ns: properties.inputs_ref = properties.inputs_ref or task_request.FilesRef( ) properties.inputs_ref.isolatedserver = ( properties.inputs_ref.isolatedserver or iso_server) properties.inputs_ref.namespace = (properties.inputs_ref.namespace or iso_ns) cipd_server = settings.cipd.default_server cipd_vers = settings.cipd.default_client_package.version if pool_cfg and pool_cfg.default_cipd: cipd_server = pool_cfg.default_cipd.server cipd_vers = pool_cfg.default_cipd.client_version if cipd_server and properties.cipd_input: properties.cipd_input.server = (properties.cipd_input.server or cipd_server) properties.cipd_input.client_package = ( properties.cipd_input.client_package or task_request.CipdPackage()) # TODO(iannucci) - finish removing 'client_package' as a task-configurable # setting. properties.cipd_input.client_package.package_name = ( 'infra/tools/cipd/${platform}') properties.cipd_input.client_package.version = ( properties.cipd_input.client_package.version or cipd_vers)
def _gen_properties(**kwargs): """Creates a TaskProperties.""" args = { u'cipd_input': _gen_cipd_input(), u'command': [u'command1', u'arg1'], u'dimensions': { u'OS': [u'Windows-3.1.1'], u'hostname': [u'localhost'], u'pool': [u'default'], }, u'env': {u'foo': u'bar', u'joe': u'2'}, u'env_prefixes': {u'PATH': [u'local/path']}, u'execution_timeout_secs': 30, u'grace_period_secs': 30, u'idempotent': False, u'inputs_ref': task_request.FilesRef( isolatedserver=u'https://isolateserver.appspot.com', namespace=u'default-gzip'), u'io_timeout_secs': None, u'has_secret_bytes': u'secret_bytes' in kwargs, } args.update(kwargs) args[u'dimensions_data'] = args.pop(u'dimensions') return task_request.TaskProperties(**args)
def post(self, task_id=None): # Unlike handshake and poll, we do not accept invalid keys here. This code # path is much more strict. request = self.parse_body() msg = log_unexpected_subset_keys(self.ACCEPTED_KEYS, self.REQUIRED_KEYS, request, self.request, 'bot', 'keys') if msg: self.abort_with_error(400, error=msg) bot_id = request['id'] task_id = request['task_id'] machine_type = None bot_info = bot_management.get_info_key(bot_id).get() if bot_info: machine_type = bot_info.machine_type # Make sure bot self-reported ID matches the authentication token. Raises # auth.AuthorizationError if not. bot_auth.validate_bot_id_and_fetch_config(bot_id, machine_type) bot_overhead = request.get('bot_overhead') cipd_pins = request.get('cipd_pins') cipd_stats = request.get('cipd_stats') cost_usd = request.get('cost_usd', 0) duration = request.get('duration') exit_code = request.get('exit_code') hard_timeout = request.get('hard_timeout') io_timeout = request.get('io_timeout') isolated_stats = request.get('isolated_stats') output = request.get('output') output_chunk_start = request.get('output_chunk_start') outputs_ref = request.get('outputs_ref') if (isolated_stats or cipd_stats) and bot_overhead is None: ereporter2.log_request(request=self.request, source='server', category='task_failure', message='Failed to update task: %s' % task_id) self.abort_with_error( 400, error= 'isolated_stats and cipd_stats require bot_overhead to be set' '\nbot_overhead: %s\nisolate_stats: %s' % (bot_overhead, isolated_stats)) run_result_key = task_pack.unpack_run_result_key(task_id) performance_stats = None if bot_overhead is not None: performance_stats = task_result.PerformanceStats( bot_overhead=bot_overhead) if isolated_stats: download = isolated_stats.get('download') or {} upload = isolated_stats.get('upload') or {} def unpack_base64(d, k): x = d.get(k) if x: return base64.b64decode(x) performance_stats.isolated_download = task_result.OperationStats( duration=download.get('duration'), initial_number_items=download.get('initial_number_items'), initial_size=download.get('initial_size'), items_cold=unpack_base64(download, 'items_cold'), items_hot=unpack_base64(download, 'items_hot')) performance_stats.isolated_upload = task_result.OperationStats( duration=upload.get('duration'), items_cold=unpack_base64(upload, 'items_cold'), items_hot=unpack_base64(upload, 'items_hot')) if cipd_stats: performance_stats.package_installation = task_result.OperationStats( duration=cipd_stats.get('duration')) if output is not None: try: output = base64.b64decode(output) except UnicodeEncodeError as e: logging.error('Failed to decode output\n%s\n%r', e, output) output = output.encode('ascii', 'replace') except TypeError as e: # Save the output as-is instead. The error will be logged in ereporter2 # and returning a HTTP 500 would only force the bot to stay in a retry # loop. logging.error('Failed to decode output\n%s\n%r', e, output) if outputs_ref: outputs_ref = task_request.FilesRef(**outputs_ref) if cipd_pins: cipd_pins = task_result.CipdPins( client_package=task_request.CipdPackage( **cipd_pins['client_package']), packages=[ task_request.CipdPackage(**args) for args in cipd_pins['packages'] ]) try: state = task_scheduler.bot_update_task( run_result_key=run_result_key, bot_id=bot_id, output=output, output_chunk_start=output_chunk_start, exit_code=exit_code, duration=duration, hard_timeout=hard_timeout, io_timeout=io_timeout, cost_usd=cost_usd, outputs_ref=outputs_ref, cipd_pins=cipd_pins, performance_stats=performance_stats) if not state: logging.info('Failed to update, please retry') self.abort_with_error(500, error='Failed to update, please retry') if state in (task_result.State.COMPLETED, task_result.State.TIMED_OUT): action = 'task_completed' elif state == task_result.State.KILLED: action = 'task_killed' else: assert state in (task_result.State.BOT_DIED, task_result.State.RUNNING), state action = 'task_update' bot_management.bot_event( event_type=action, bot_id=bot_id, external_ip=self.request.remote_addr, authenticated_as=auth.get_peer_identity().to_bytes(), dimensions=None, state=None, version=None, quarantined=None, maintenance_msg=None, task_id=task_id, task_name=None) except ValueError as e: ereporter2.log_request(request=self.request, source='server', category='task_failure', message='Failed to update task: %s' % e) self.abort_with_error(400, error=str(e)) except webob.exc.HTTPException: raise except Exception as e: logging.exception('Internal error: %s', e) self.abort_with_error(500, error=str(e)) self.send_response({ 'must_stop': state == task_result.State.KILLED, 'ok': True })
def test_bad_values(self): with self.assertRaises(AssertionError): mkreq(None) with self.assertRaises(AssertionError): mkreq({}) with self.assertRaises(AttributeError): mkreq(_gen_request(properties={'foo': 'bar'})) mkreq(_gen_request()) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command=[]))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command={'a': 'b'}))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command='python'))) mkreq(_gen_request(properties=dict(command=['python']))) mkreq(_gen_request(properties=dict(command=[u'python']))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(packages=[{}]))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict( packages=[dict(package_name='rm')]))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict(packages=[{ 'package_name': 'infra|rm', 'version': 'latest' }]))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict(packages=[ { 'package_name': 'rm', 'version': 'latest' }, { 'package_name': 'rm', 'version': 'canary' }, ]))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict(idempotent=True, packages=[{ 'package_name': 'rm', 'version': 'latest' }]))) mkreq( _gen_request(properties=dict(packages=[{ 'package_name': 'rm', 'version': 'latest' }]))) with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(dimensions=[]))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(dimensions={}))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict(dimensions={ u'id': u'b', u'a:': u'b' }))) mkreq( _gen_request(properties=dict(dimensions={ u'id': u'b', u'a.': u'b' }))) with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(env=[]))) with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(env={u'a': 1}))) mkreq(_gen_request(properties=dict(env={}))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY + 1)) mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY)) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(properties=dict( execution_timeout_secs=task_request._ONE_DAY_SECS + 1))) mkreq( _gen_request(properties=dict( execution_timeout_secs=task_request._ONE_DAY_SECS))) now = utils.utcnow() with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(created_ts=now, expiration_ts=now + datetime.timedelta( seconds=task_request._MIN_TIMEOUT_SECS - 1))) with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request(created_ts=now, expiration_ts=now + datetime.timedelta( seconds=task_request._SEVEN_DAYS_SECS + 1))) mkreq( _gen_request( created_ts=now, expiration_ts=now + datetime.timedelta(seconds=task_request._MIN_TIMEOUT_SECS))) mkreq( _gen_request( created_ts=now, expiration_ts=now + datetime.timedelta(seconds=task_request._SEVEN_DAYS_SECS))) # Try with isolated/isolatedserver/namespace. with self.assertRaises(datastore_errors.BadValueError): mkreq( _gen_request( properties=dict(command=['see', 'spot', 'run'], inputs_ref=task_request.FilesRef())))
def post(self, task_id=None): # Unlike handshake and poll, we do not accept invalid keys here. This code # path is much more strict. request = self.parse_body() msg = log_unexpected_subset_keys(self.ACCEPTED_KEYS, self.REQUIRED_KEYS, request, self.request, 'bot', 'keys') if msg: self.abort_with_error(400, error=msg) bot_id = request['id'] cost_usd = request['cost_usd'] task_id = request['task_id'] bot_overhead = request.get('bot_overhead') duration = request.get('duration') exit_code = request.get('exit_code') hard_timeout = request.get('hard_timeout') io_timeout = request.get('io_timeout') isolated_stats = request.get('isolated_stats') output = request.get('output') output_chunk_start = request.get('output_chunk_start') outputs_ref = request.get('outputs_ref') if bool(isolated_stats) != (bot_overhead is not None): ereporter2.log_request(request=self.request, source='server', category='task_failure', message='Failed to update task: %s' % task_id) self.abort_with_error( 400, error='Both bot_overhead and isolated_stats must be set ' 'simultaneously\nbot_overhead: %s\nisolated_stats: %s' % (bot_overhead, isolated_stats)) run_result_key = task_pack.unpack_run_result_key(task_id) performance_stats = None if isolated_stats: download = isolated_stats['download'] upload = isolated_stats['upload'] performance_stats = task_result.PerformanceStats( bot_overhead=bot_overhead, isolated_download=task_result.IsolatedOperation( duration=download['duration'], initial_number_items=download['initial_number_items'], initial_size=download['initial_size'], items_cold=base64.b64decode(download['items_cold']), items_hot=base64.b64decode(download['items_hot'])), isolated_upload=task_result.IsolatedOperation( duration=upload['duration'], items_cold=base64.b64decode(upload['items_cold']), items_hot=base64.b64decode(upload['items_hot']))) if output is not None: try: output = base64.b64decode(output) except UnicodeEncodeError as e: logging.error('Failed to decode output\n%s\n%r', e, output) output = output.encode('ascii', 'replace') except TypeError as e: # Save the output as-is instead. The error will be logged in ereporter2 # and returning a HTTP 500 would only force the bot to stay in a retry # loop. logging.error('Failed to decode output\n%s\n%r', e, output) if outputs_ref: outputs_ref = task_request.FilesRef(**outputs_ref) try: state = task_scheduler.bot_update_task( run_result_key=run_result_key, bot_id=bot_id, output=output, output_chunk_start=output_chunk_start, exit_code=exit_code, duration=duration, hard_timeout=hard_timeout, io_timeout=io_timeout, cost_usd=cost_usd, outputs_ref=outputs_ref, performance_stats=performance_stats) if not state: logging.info('Failed to update, please retry') self.abort_with_error(500, error='Failed to update, please retry') if state in (task_result.State.COMPLETED, task_result.State.TIMED_OUT): action = 'task_completed' else: assert state in (task_result.State.BOT_DIED, task_result.State.RUNNING), state action = 'task_update' bot_management.bot_event(event_type=action, bot_id=bot_id, external_ip=self.request.remote_addr, dimensions=None, state=None, version=None, quarantined=None, task_id=task_id, task_name=None) except ValueError as e: ereporter2.log_request(request=self.request, source='server', category='task_failure', message='Failed to update task: %s' % e) self.abort_with_error(400, error=str(e)) except webob.exc.HTTPException: raise except Exception as e: logging.exception('Internal error: %s', e) self.abort_with_error(500, error=str(e)) # TODO(maruel): When a task is canceled, reply with 'DIE' so that the bot # reboots itself to abort the task abruptly. It is useful when a task hangs # and the timeout was set too long or the task was superseded by a newer # task with more recent executable (e.g. a new Try Server job on a newer # patchset on Rietveld). self.send_response({'ok': True})
def test_bad_values(self): with self.assertRaises(AssertionError): mkreq(None) with self.assertRaises(AssertionError): mkreq({}) with self.assertRaises(AttributeError): mkreq(_gen_request(properties={'foo': 'bar'})) mkreq(_gen_request()) # Command. with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command=[]))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command={'a': 'b'}))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(command='python'))) mkreq(_gen_request(properties=dict(command=['python']))) mkreq(_gen_request(properties=dict(command=[u'python']))) # CIPD. def mkcipdreq(idempotent=False, **cipd_input): mkreq(_gen_request( properties=dict(idempotent=idempotent, cipd_input=cipd_input))) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[{}]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[ dict(package_name='infra|rm', path='.', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[dict(package_name='rm', path='.')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[dict(package_name='rm', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[dict(package_name='rm', path='/', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[dict(package_name='rm', path='/a', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[ dict(package_name='rm', path='a/..', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[ dict(package_name='rm', path='a/./b', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(packages=[ dict(package_name='rm', path='.', version='latest'), dict(package_name='rm', path='.', version='canary'), ]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq( idempotent=True, packages=[dict(package_name='rm', path='.', version='latest')]) with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(server='abc') with self.assertRaises(datastore_errors.BadValueError): mkcipdreq(client_package=dict(package_name='--bad package--')) mkcipdreq() mkcipdreq(packages=[dict(package_name='rm', path='.', version='latest')]) mkcipdreq( client_package=dict( package_name='infra/tools/cipd/${platform}', version='git_revision:daedbeef', ), packages=[dict(package_name='rm', path='.', version='latest')], server='https://chrome-infra-packages.appspot.com', ) # Named caches. mkcachereq = lambda *c: mkreq(_gen_request(properties=dict(caches=c))) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='', path='git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq( dict(name='git_chromium', path='git_cache'), dict(name='git_v8', path='git_cache'), ) with self.assertRaises(datastore_errors.BadValueError): mkcachereq( dict(name='git_chromium', path='git_cache'), dict(name='git_chromium', path='git_cache2'), ) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='/git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='../git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='git_cache/../../a')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='../git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='git_cache//a')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='git_chromium', path='a/./git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='has space', path='git_cache')) with self.assertRaises(datastore_errors.BadValueError): mkcachereq(dict(name='CAPITAL', path='git_cache')) with self.assertRaises(datastore_errors.BadValueError): # A CIPD package and named caches cannot be mapped to the same path. mkreq(_gen_request(properties=dict( caches=[dict(name='git_chromium', path='git_cache')], cipd_input=dict(packages=[ dict(package_name='foo', path='git_cache', version='latest')])))) mkcachereq() mkcachereq(dict(name='git_chromium', path='git_cache')) mkcachereq( dict(name='git_chromium', path='git_cache'), dict(name='build_chromium', path='out')) # Dimensions. with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(dimensions=[]))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict(dimensions={}))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request( properties=dict(dimensions={u'id': u'b', u'a:': u'b'}))) mkreq(_gen_request( properties=dict(dimensions={u'id': u'b', u'a.': u'b'}))) # Environment. with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(env=[]))) with self.assertRaises(TypeError): mkreq(_gen_request(properties=dict(env={u'a': 1}))) mkreq(_gen_request(properties=dict(env={}))) # Priority. with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY+1)) mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY)) # Execution timeout. with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request( properties=dict(execution_timeout_secs=task_request._ONE_DAY_SECS+1))) mkreq(_gen_request( properties=dict(execution_timeout_secs=task_request._ONE_DAY_SECS))) # Expiration. now = utils.utcnow() with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request( created_ts=now, expiration_ts=now + datetime.timedelta( seconds=task_request._MIN_TIMEOUT_SECS-1))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request( created_ts=now, expiration_ts= now+datetime.timedelta(seconds=task_request._SEVEN_DAYS_SECS+1))) mkreq(_gen_request( created_ts=now, expiration_ts= now+datetime.timedelta(seconds=task_request._MIN_TIMEOUT_SECS))) mkreq(_gen_request( created_ts=now, expiration_ts= now + datetime.timedelta(seconds=task_request._SEVEN_DAYS_SECS))) # Try with isolated/isolatedserver/namespace. with self.assertRaises(datastore_errors.BadValueError): # Both command and inputs_ref.isolated. mkreq(_gen_request(properties=dict( command=['see', 'spot', 'run'], inputs_ref=task_request.FilesRef( isolated='deadbeef', isolatedserver='http://localhost:1', namespace='default-gzip')))) with self.assertRaises(datastore_errors.BadValueError): # inputs_ref without server/namespace. mkreq(_gen_request(properties=dict(inputs_ref=task_request.FilesRef()))) with self.assertRaises(datastore_errors.BadValueError): mkreq(_gen_request(properties=dict( command=[], inputs_ref=task_request.FilesRef( isolatedserver='https://isolateserver.appspot.com', namespace='default-gzip^^^', )))) mkreq(_gen_request(properties=dict( command=[], inputs_ref=task_request.FilesRef( isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', isolatedserver='http://localhost:1', namespace='default-gzip'))))
def run(): # 2 consecutive GETs, one PUT. run_result_future = run_result_key.get_async() result_summary_future = result_summary_key.get_async() run_result = run_result_future.get_result() if not run_result: result_summary_future.wait() return None, False, 'is missing' if run_result.bot_id != bot_id: result_summary_future.wait() return None, False, 'expected bot (%s) but had update from bot %s' % ( run_result.bot_id, bot_id) # This happens as an HTTP request is retried when the DB write succeeded but # it still returned HTTP 500. if len(run_result.exit_codes) and exit_code is not None: if run_result.exit_codes[0] != exit_code: result_summary_future.wait() return None, False, 'got 2 different exit_codes; %d then %d' % ( run_result.exit_codes[0], exit_code) if (duration is None) != (exit_code is None): result_summary_future.wait() return None, False, ( 'had unexpected duration; expected iff a command completes; index %d' % len(run_result.exit_codes)) if exit_code is not None: # The command completed. run_result.durations.append(duration) run_result.exit_codes.append(exit_code) if outputs_ref: run_result.outputs_ref = task_request.FilesRef(**outputs_ref) task_completed = len(run_result.exit_codes) == 1 if run_result.state in task_result.State.STATES_RUNNING: if hard_timeout or io_timeout: run_result.state = task_result.State.TIMED_OUT run_result.completed_ts = now elif task_completed: run_result.state = task_result.State.COMPLETED run_result.completed_ts = now run_result.signal_server_version(server_version) to_put = [run_result] if output: # This does 1 multi GETs. This also modifies run_result in place. to_put.extend( run_result.append_output(0, output, output_chunk_start or 0)) run_result.cost_usd = max(cost_usd, run_result.cost_usd or 0.) run_result.modified_ts = now result_summary = result_summary_future.get_result() if (result_summary.try_number and result_summary.try_number > run_result.try_number): # The situation where a shard is retried but the bot running the previous # try somehow reappears and reports success, the result must still show # the last try's result. We still need to update cost_usd manually. result_summary.costs_usd[run_result.try_number-1] = run_result.cost_usd result_summary.modified_ts = now else: result_summary.set_from_run_result(run_result, request) to_put.append(result_summary) ndb.put_multi(to_put) return run_result, task_completed, None