def get_create_time_order_build_id_range(self): """Returns low/high build id range for results ordered by creation time. Low boundary is inclusive. High boundary is exclusive. Assumes self is valid. """ if self.build_low is not None or self.build_high is not None: return (self.build_low, self.build_high) else: return model.build_id_range(self.create_time_low, self.create_time_high)
def test_build_id_range(self): time_low = datetime.datetime(2015, 1, 1) time_high = time_low + datetime.timedelta(seconds=10) id_low, id_high = model.build_id_range(time_low, time_high) unit = model._TIME_RESOLUTION ones = (1 << model._BUILD_ID_SUFFIX_LEN) - 1 for suffix in (0, ones): def in_range(t): build_id = model._id_time_segment(t) | suffix return id_low <= build_id < id_high self.assertFalse(in_range(time_low - unit)) self.assertTrue(in_range(time_low)) self.assertTrue(in_range(time_low + unit)) self.assertTrue(in_range(time_high - unit)) self.assertFalse(in_range(time_high)) self.assertFalse(in_range(time_high + unit))
def do(self, payload): proc = payload['proc'] now = utils.utcnow() space_start, space_end = model.build_id_range( now - model.BUILD_STORAGE_DURATION, now + datetime.timedelta(days=1), ) assert space_end <= _MAX_BUILD_ID space_size = space_end - space_start + 1 logging.info( 'build space [%d..%d], size %d, %d shards', space_start, space_end, space_size, int(math.ceil(float(space_size) / SEGMENT_SIZE)), ) next_seg_start = space_start tasks = [] while next_seg_start <= space_end: seg_start = next_seg_start seg_end = min(_MAX_BUILD_ID, seg_start + SEGMENT_SIZE - 1) next_seg_start = seg_end + 1 tasks.append(( None, 'segment/seg:{seg_index}-percent:0', { 'job_id': self.request.headers['X-AppEngine-TaskName'], 'iteration': 0, 'seg_index': len(tasks), 'seg_start': seg_start, 'seg_end': seg_end, 'started_ts': utils.datetime_to_timestamp(utils.utcnow()), 'proc': proc, }, )) self._recurse(tasks) logging.info('enqueued %d segment tasks with proc %r', len(tasks), proc)
def delete_builds(): """Finds very old builds and deletes them and their children. Very old is defined by model.BUILD_STORAGE_DURATION. """ @ndb.transactional_tasklet def txn_async(build_key): to_delete = [build_key] for clazz in model.BUILD_CHILD_CLASSES: keys = yield clazz.query(ancestor=build_key).fetch_async(keys_only=True) to_delete.extend(keys) yield ndb.delete_multi_async(to_delete) # Utilize time-based build keys. id_low, _ = model.build_id_range( None, utils.utcnow() - model.BUILD_STORAGE_DURATION ) q = model.Build.query(model.Build.key > ndb.Key(model.Build, id_low)) nones = q.map_async(txn_async, keys_only=True, limit=1000).get_result() logging.info('Deleted %d builds', len(nones))
def expire_builds(): """Finds old incomplete builds and marks them as TIMEOUT.""" expected_statuses = (common_pb2.SCHEDULED, common_pb2.STARTED) @ndb.transactional_tasklet def txn_async(build_key): now = utils.utcnow() build = yield build_key.get_async() if not build or build.status not in expected_statuses: raise ndb.Return(False, build) # pragma: no cover build.clear_lease() build.proto.status = common_pb2.INFRA_FAILURE build.proto.status_details.timeout.SetInParent() build.proto.end_time.FromDatetime(now) build.status_changed_time = now yield build.put_async(), events.on_build_completing_async(build) raise ndb.Return(True, build) @ndb.tasklet def update_async(build_key): # This is the only yield in this function, but it is not # performance-critical. updated, build = yield txn_async(build_key) if updated: # pragma: no branch events.on_build_completed(build) # Utilize time-based build keys. id_low, _ = model.build_id_range(None, utils.utcnow() - model.BUILD_TIMEOUT) q = model.Build.query( model.Build.key > ndb.Key(model.Build, id_low), # Cannot use >1 inequality filters per query. model.Build.status.IN(expected_statuses), ) q.map_async(update_async, keys_only=True).get_result()